repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/data/test_transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import pytest
import torch
from common import utils
from common.subsample import RandomMaskFunc
from data import transforms
def create_input(shape):
input = np.arange(np.product(shape)).reshape(shape)
input = torch.from_numpy(input).float()
return input
@pytest.mark.parametrize('shape, center_fractions, accelerations', [
([4, 32, 32, 2], [0.08], [4]),
([2, 64, 64, 2], [0.04, 0.08], [8, 4]),
])
def test_apply_mask(shape, center_fractions, accelerations):
mask_func = RandomMaskFunc(center_fractions, accelerations)
expected_mask = mask_func(shape, seed=123)
input = create_input(shape)
output, mask = transforms.apply_mask(input, mask_func, seed=123)
assert output.shape == input.shape
assert mask.shape == expected_mask.shape
assert np.all(expected_mask.numpy() == mask.numpy())
assert np.all(np.where(mask.numpy() == 0, 0, output.numpy()) == output.numpy())
@pytest.mark.parametrize('shape', [
[3, 3],
[4, 6],
[10, 8, 4],
])
def test_fft2(shape):
shape = shape + [2]
input = create_input(shape)
out_torch = transforms.fft2(input).numpy()
out_torch = out_torch[..., 0] + 1j * out_torch[..., 1]
input_numpy = utils.tensor_to_complex_np(input)
input_numpy = np.fft.ifftshift(input_numpy, (-2, -1))
out_numpy = np.fft.fft2(input_numpy, norm='ortho')
out_numpy = np.fft.fftshift(out_numpy, (-2, -1))
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape', [
[3, 3],
[4, 6],
[10, 8, 4],
])
def test_ifft2(shape):
shape = shape + [2]
input = create_input(shape)
out_torch = transforms.ifft2(input).numpy()
out_torch = out_torch[..., 0] + 1j * out_torch[..., 1]
input_numpy = utils.tensor_to_complex_np(input)
input_numpy = np.fft.ifftshift(input_numpy, (-2, -1))
out_numpy = np.fft.ifft2(input_numpy, norm='ortho')
out_numpy = np.fft.fftshift(out_numpy, (-2, -1))
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape', [
[3, 3],
[4, 6],
[10, 8, 4],
])
def test_complex_abs(shape):
shape = shape + [2]
input = create_input(shape)
out_torch = transforms.complex_abs(input).numpy()
input_numpy = utils.tensor_to_complex_np(input)
out_numpy = np.abs(input_numpy)
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape, dim', [
[[3, 3], 0],
[[4, 6], 1],
[[10, 8, 4], 2],
])
def test_root_sum_of_squares(shape, dim):
input = create_input(shape)
out_torch = transforms.root_sum_of_squares(input, dim).numpy()
out_numpy = np.sqrt(np.sum(input.numpy() ** 2, dim))
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape, target_shape', [
[[10, 10], [4, 4]],
[[4, 6], [2, 4]],
[[8, 4], [4, 4]],
])
def test_center_crop(shape, target_shape):
input = create_input(shape)
out_torch = transforms.center_crop(input, target_shape).numpy()
assert list(out_torch.shape) == target_shape
@pytest.mark.parametrize('shape, target_shape', [
[[10, 10], [4, 4]],
[[4, 6], [2, 4]],
[[8, 4], [4, 4]],
])
def test_complex_center_crop(shape, target_shape):
shape = shape + [2]
input = create_input(shape)
out_torch = transforms.complex_center_crop(input, target_shape).numpy()
assert list(out_torch.shape) == target_shape + [2, ]
@pytest.mark.parametrize('shape, mean, stddev', [
[[10, 10], 0, 1],
[[4, 6], 4, 10],
[[8, 4], 2, 3],
])
def test_normalize(shape, mean, stddev):
input = create_input(shape)
output = transforms.normalize(input, mean, stddev).numpy()
assert np.isclose(output.mean(), (input.numpy().mean() - mean) / stddev)
assert np.isclose(output.std(), input.numpy().std() / stddev)
@pytest.mark.parametrize('shape', [
[10, 10],
[20, 40, 30],
])
def test_normalize_instance(shape):
input = create_input(shape)
output, mean, stddev = transforms.normalize_instance(input)
output = output.numpy()
assert np.isclose(input.numpy().mean(), mean, rtol=1e-2)
assert np.isclose(input.numpy().std(), stddev, rtol=1e-2)
assert np.isclose(output.mean(), 0, rtol=1e-2, atol=1e-3)
assert np.isclose(output.std(), 1, rtol=1e-2, atol=1e-3)
@pytest.mark.parametrize('shift, dim', [
(0, 0),
(1, 0),
(-1, 0),
(100, 0),
((1, 2), (1, 2)),
])
@pytest.mark.parametrize('shape', [
[5, 6, 2],
[3, 4, 5],
])
def test_roll(shift, dim, shape):
input = np.arange(np.product(shape)).reshape(shape)
out_torch = transforms.roll(torch.from_numpy(input), shift, dim).numpy()
out_numpy = np.roll(input, shift, dim)
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape', [
[5, 3],
[2, 4, 6],
])
def test_fftshift(shape):
input = np.arange(np.product(shape)).reshape(shape)
out_torch = transforms.fftshift(torch.from_numpy(input)).numpy()
out_numpy = np.fft.fftshift(input)
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape', [
[5, 3],
[2, 4, 5],
[2, 7, 5],
])
def test_ifftshift(shape):
input = np.arange(np.product(shape)).reshape(shape)
out_torch = transforms.ifftshift(torch.from_numpy(input)).numpy()
out_numpy = np.fft.ifftshift(input)
assert np.allclose(out_torch, out_numpy)
| 5,497 | 28.244681 | 83 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/data/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def apply_mask(data, mask_func, seed=None, padding=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
if padding is not None:
mask[:, :, :padding[0]] = 0
mask[:, :, padding[1]:] = 0 # padding value inclusive on right of zeros
masked_data = data * mask + 0.0 # The + 0.0 removes the sign of the zeros
return masked_data, mask
def mask_center(x, mask_from, mask_to):
b, c, h, w, two = x.shape
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def complex_mul(x, y):
assert x.shape[-1] == y.shape[-1] == 2
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x):
assert x.shape[-1] == 2
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
'''
def fft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
'''
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_np(data): ############################################### MZD
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.shape[-1] == 2
return np.sqrt( (data ** 2).sum(axis=-1) )
def complex_abs_sq(data):
"""
Compute the squared absolute value of a complex tensor
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1)
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def root_sum_of_squares_complex(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt(complex_abs_sq(data).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(x, y):
"""
Apply a center crop on the larger image to the size of the smaller image.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def fft(input, signal_ndim, normalized=False):
# This function is called from the fft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.fftn(torch.view_as_complex(input), dim=dims, norm=norm))
def ifft(input, signal_ndim, normalized=False):
# This function is called from the ifft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.ifftn(torch.view_as_complex(input), dim=dims, norm=norm))
def fft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2 dimensional Fast Fourier Transform. It calls the fft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2-dimensional Inverse Fast Fourier Transform. It calls the ifft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
| 11,863 | 32.047354 | 155 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/include/mri_helpers.py | import torch
import torch.nn as nn
import torchvision
import sys
import numpy as np
from PIL import Image
import PIL
import numpy as np
from torch.autograd import Variable
import random
import numpy as np
import torch
import matplotlib.pyplot as plt
from PIL import Image
import PIL
from torch.autograd import Variable
dtype = torch.cuda.FloatTensor
from . import transforms as transform
from .helpers import var_to_np,np_to_var
import numpy
import scipy.signal
import scipy.ndimage
def ksp2measurement(ksp):
return np_to_var( np.transpose( np.array([np.real(ksp),np.imag(ksp)]) , (1, 2, 3, 0)) )
def lsreconstruction(measurement,mode='both'):
# measurement has dimension (1, num_slices, x, y, 2)
fimg = transform.ifft2(measurement)
normimag = torch.norm(fimg[:,:,:,:,0])
normreal = torch.norm(fimg[:,:,:,:,1])
#print("real/img parts: ",normimag, normreal)
if mode == 'both':
return torch.sqrt(fimg[:,:,:,:,0]**2 + fimg[:,:,:,:,1]**2)
elif mode == 'real':
return torch.tensor(fimg[:,:,:,:,0]) #torch.sqrt(fimg[:,:,:,:,0]**2)
elif mode == 'imag':
return torch.sqrt(fimg[:,:,:,:,1]**2)
def root_sum_of_squares2(lsimg):
out = np.zeros(lsimg[0].shape)
for img in lsimg:
out += img**2
return np.sqrt(out)
def crop_center2(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
def channels2imgs(out):
sh = out.shape
chs = int(sh[0]/2)
imgs = np.zeros( (chs,sh[1],sh[2]) )
for i in range(chs):
imgs[i] = np.sqrt( out[2*i]**2 + out[2*i+1]**2 )
return imgs
def forwardm(img,mask):
# img has dimension (2*num_slices, x,y)
# output has dimension (1, num_slices, x, y, 2)
mask = np_to_var(mask)[0].type(dtype)
s = img.shape
ns = int(s[1]/2) # number of slices
fimg = Variable( torch.zeros( (s[0],ns,s[2],s[3],2 ) ) ).type(dtype)
for i in range(ns):
fimg[0,i,:,:,0] = img[0,2*i,:,:]
fimg[0,i,:,:,1] = img[0,2*i+1,:,:]
Fimg = transform.fft2(fimg) # dim: (1,num_slices,x,y,2)
for i in range(ns):
Fimg[0,i,:,:,0] *= mask
Fimg[0,i,:,:,1] *= mask
return Fimg
def get_scale_factor(net,num_channels,in_size,slice_ksp,scale_out=1,scale_type="norm"):
### get norm of deep decoder output
# get net input, scaling of that is irrelevant
shape = [1,num_channels, in_size[0], in_size[1]]
ni = Variable(torch.zeros(shape)).type(dtype)
ni.data.uniform_()
# generate random image
try:
out_chs = net( ni.type(dtype),scale_out=scale_out ).data.cpu().numpy()[0]
except:
out_chs = net( ni.type(dtype) ).data.cpu().numpy()[0]
out_imgs = channels2imgs(out_chs)
out_img_tt = transform.root_sum_of_squares( torch.tensor(out_imgs) , dim=0)
### get norm of least-squares reconstruction
ksp_tt = transform.to_tensor(slice_ksp)
orig_tt = transform.ifft2(ksp_tt) # Apply Inverse Fourier Transform to get the complex image
orig_imgs_tt = transform.complex_abs(orig_tt) # Compute absolute value to get a real image
orig_img_tt = transform.root_sum_of_squares(orig_imgs_tt, dim=0)
orig_img_np = orig_img_tt.cpu().numpy()
if scale_type == "norm":
s = np.linalg.norm(out_img_tt) / np.linalg.norm(orig_img_np)
if scale_type == "mean":
s = (out_img_tt.mean() / orig_img_np.mean()).numpy()[np.newaxis][0]
return s,ni
def data_consistency(parnet, parni, mask1d, slice_ksp_torchtensor1):
img = parnet(parni.type(dtype))
s = img.shape
ns = int(s[1]/2) # number of slices
fimg = Variable( torch.zeros( (s[0],ns,s[2],s[3],2 ) ) ).type(dtype)
for i in range(ns):
fimg[0,i,:,:,0] = img[0,2*i,:,:]
fimg[0,i,:,:,1] = img[0,2*i+1,:,:]
Fimg = transform.fft2(fimg) # dim: (1,num_slices,x,y,2)
# ksp has dim: (num_slices,x,y)
meas = slice_ksp_torchtensor1.unsqueeze(0) # dim: (1,num_slices,x,y,2)
mask = torch.from_numpy(np.array(mask1d, dtype=np.uint8))
ksp_dc = Fimg.clone()
ksp_dc = ksp_dc.detach().cpu()
ksp_dc[:,:,:,mask==1,:] = meas[:,:,:,mask==1,:] # after data consistency block
img_dc = transform.ifft2(ksp_dc)[0]
out = []
for img in img_dc.detach().cpu():
out += [ img[:,:,0].numpy() , img[:,:,1].numpy() ]
par_out_chs = np.array(out)
par_out_imgs = channels2imgs(par_out_chs)
# deep decoder reconstruction
prec = root_sum_of_squares2(par_out_imgs)
if prec.shape[0] > 320:
prec = crop_center2(prec,320,320)
return prec | 4,616 | 32.215827 | 106 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/include/helpers.py | import torch
import torch.nn as nn
import torchvision
import sys
import numpy as np
from PIL import Image
import PIL
import numpy as np
from torch.autograd import Variable
import random
import numpy as np
import torch
import matplotlib.pyplot as plt
from PIL import Image
import PIL
from torch.autograd import Variable
def myimgshow(plt,img):
if(img.shape[0] == 1):
plt.imshow(np.clip(img[0],0,1),cmap='Greys',interpolation='none')
else:
plt.imshow(np.clip(img.transpose(1, 2, 0),0,1),interpolation='none')
def load_and_crop(imgname,target_width=512,target_height=512):
'''
imgname: string of image location
load an image, and center-crop if the image is large enough, else return none
'''
img = Image.open(imgname)
width, height = img.size
if width <= target_width or height <= target_height:
return None
left = (width - target_width)/2
top = (height - target_height)/2
right = (width + target_width)/2
bottom = (height + target_height)/2
return img.crop((left, top, right, bottom))
def save_np_img(img,filename):
if(img.shape[0] == 1):
plt.imshow(np.clip(img[0],0,1),cmap='Greys',interpolation='nearest')
else:
plt.imshow(np.clip(img.transpose(1, 2, 0),0,1))
plt.axis('off')
plt.savefig(filename, bbox_inches='tight')
plt.close()
def np_to_tensor(img_np):
'''Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np)
def np_to_var(img_np, dtype = torch.cuda.FloatTensor):
'''Converts image in numpy.array to torch.Variable.
From C x W x H [0..1] to 1 x C x W x H [0..1]
'''
return Variable(np_to_tensor(img_np)[None, :])
def var_to_np(img_var):
'''Converts an image in torch.Variable format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.data.cpu().numpy()[0]
def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2,0,1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def rgb2ycbcr(img):
#out = color.rgb2ycbcr( img.transpose(1, 2, 0) )
#return out.transpose(2,0,1)/256.
r,g,b = img[0],img[1],img[2]
y = 0.299*r+0.587*g+0.114*b
cb = 0.5 - 0.168736*r - 0.331264*g + 0.5*b
cr = 0.5 + 0.5*r - 0.418588*g - 0.081312*b
return np.array([y,cb,cr])
def ycbcr2rgb(img):
#out = color.ycbcr2rgb( 256.*img.transpose(1, 2, 0) )
#return (out.transpose(2,0,1) - np.min(out))/(np.max(out)-np.min(out))
y,cb,cr = img[0],img[1],img[2]
r = y + 1.402*(cr-0.5)
g = y - 0.344136*(cb-0.5) - 0.714136*(cr-0.5)
b = y + 1.772*(cb - 0.5)
return np.array([r,g,b])
def mse(x_hat,x_true,maxv=1.):
x_hat = x_hat.flatten()
x_true = x_true.flatten()
mse = np.mean(np.square(x_hat-x_true))
energy = np.mean(np.square(x_true))
return mse/energy
def psnr(x_hat,x_true,maxv=1.):
x_hat = x_hat.flatten()
x_true = x_true.flatten()
mse=np.mean(np.square(x_hat-x_true))
psnr_ = 10.*np.log(maxv**2/mse)/np.log(10.)
return psnr_
def num_param(net):
s = sum([np.prod(list(p.size())) for p in net.parameters()]);
return s
#print('Number of params: %d' % s)
def rgb2gray(rgb):
r, g, b = rgb[0,:,:], rgb[1,:,:], rgb[2,:,:]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return np.array([gray])
def savemtx_for_logplot(A,filename = "exp.dat"):
ind = sorted(list(set([int(i) for i in np.geomspace(1, len(A[0])-1 ,num=700)])))
A = [ [a[i] for i in ind] for a in A]
X = np.array([ind] + A)
np.savetxt(filename, X.T, delimiter=' ')
def get_imgnet_imgs(num_samples = 100, path = '../imagenet/',verbose=False):
perm = [i for i in range(1,50000)]
random.Random(4).shuffle(perm)
siz = 512
file = open("exp_imgnet_imgs.txt","w")
imgs = []
sampled = 0
imgslist = []
for imgnr in perm:
# prepare and select image
# Format is: ILSVRC2012_val_00024995.JPEG
imgnr_str = str(imgnr).zfill(8)
imgname = path + 'ILSVRC2012_val_' + imgnr_str + ".JPEG"
img = load_and_crop(imgname,target_width=512,target_height=512)
if img is None: # then the image could not be croped to 512x512
continue
img_np = pil_to_np(img)
if img_np.shape[0] != 3: # we only want to consider color images
continue
if verbose:
imgslist += ['ILSVRC2012_val_' + imgnr_str + ".JPEG"]
print("cp ", imgname, "./imgs")
imgs += [img_np]
sampled += 1
if sampled >= num_samples:
break
if verbose:
print(imgslist)
return imgs
| 4,860 | 26.308989 | 84 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/include/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def apply_mask(data, mask_func = None, mask = None, seed=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
if mask is None:
mask = mask_func(shape, seed)
return data * mask, mask
def mask_center(x, mask_from, mask_to):
b, c, h, w, two = x.shape
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def complex_mul(x, y):
assert x.shape[-1] == y.shape[-1] == 2
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x):
assert x.shape[-1] == 2
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
'''
def fft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
'''
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_np(data): ############################################### MZD
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.shape[-1] == 2
return np.sqrt( (data ** 2).sum(axis=-1) )
def complex_abs_sq(data):
"""
Compute the squared absolute value of a complex tensor
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1)
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def root_sum_of_squares_complex(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt(complex_abs_sq(data).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(x, y):
"""
Apply a center crop on the larger image to the size of the smaller image.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def fft(input, signal_ndim, normalized=False):
# This function is called from the fft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.fftn(torch.view_as_complex(input), dim=dims, norm=norm))
def ifft(input, signal_ndim, normalized=False):
# This function is called from the ifft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.ifftn(torch.view_as_complex(input), dim=dims, norm=norm))
def fft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2 dimensional Fast Fourier Transform. It calls the fft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2-dimensional Inverse Fast Fourier Transform. It calls the ifft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
| 11,673 | 31.70028 | 155 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/include/pytorch_ssim/__init__.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
"""def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)"""
| 2,641 | 34.702703 | 104 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/mri_model.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from collections import defaultdict
import numpy as np
import pytorch_lightning as pl
import torch
import torchvision
from torch.utils.data import DistributedSampler, DataLoader
from .common import evaluate
from .common.utils import save_reconstructions
from .data.mri_data import SliceData
from .data import transforms
class MRIModel(pl.LightningModule):
"""
Abstract super class for Deep Learning based reconstruction models.
This is a subclass of the LightningModule class from pytorch_lightning, with
some additional functionality specific to fastMRI:
- fastMRI data loaders
- Evaluating reconstructions
- Visualization
- Saving test reconstructions
To implement a new reconstruction model, inherit from this class and implement the
following methods:
- train_data_transform, val_data_transform, test_data_transform:
Create and return data transformer objects for each data split
- training_step, validation_step, test_step:
Define what happens in one step of training, validation and testing respectively
- configure_optimizers:
Create and return the optimizers
Other methods from LightningModule can be overridden as needed.
"""
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
def _create_data_loader(self, data_transform, data_partition, sample_rate=None):
sample_rate = sample_rate or self.hparams.sample_rate
dataset = SliceData(
root=self.hparams.data_path / f'{self.hparams.challenge}_{data_partition}',
transform=data_transform,
sample_rate=sample_rate,
challenge=self.hparams.challenge
)
sampler = DistributedSampler(dataset)
return DataLoader(
dataset=dataset,
batch_size=self.hparams.batch_size,
num_workers=0,
pin_memory=True,
sampler=sampler,
)
def train_data_transform(self):
raise NotImplementedError
#@pl.data_loader
def train_dataloader(self):
return self._create_data_loader(self.train_data_transform(), data_partition='train')
def val_data_transform(self):
raise NotImplementedError
#@pl.data_loader
def val_dataloader(self):
return self._create_data_loader(self.val_data_transform(), data_partition='val')
def test_data_transform(self):
raise NotImplementedError
#@pl.data_loader
def test_dataloader(self):
return self._create_data_loader(self.test_data_transform(), data_partition='test', sample_rate=1.)
def _evaluate(self, val_logs):
losses = []
outputs = defaultdict(list)
targets = defaultdict(list)
for log in val_logs:
losses.append(log['val_loss'].cpu().numpy())
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
targets[fname].append((slice, log['target'][i]))
metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
for fname in outputs:
output = np.stack([transforms.complex_abs_np(np.moveaxis(out,0,2)) for _, out in sorted(outputs[fname])]) ## MZD
target = np.stack([transforms.complex_abs_np(np.moveaxis(tgt,0,2)) for _, tgt in sorted(targets[fname])]) ## MZD
#print(target.shape,output.shape)
metrics['nmse'].append(evaluate.nmse(target, output))
metrics['ssim'].append(evaluate.ssim(target, output))
metrics['psnr'].append(evaluate.psnr(target, output))
metrics = {metric: np.mean(values) for metric, values in metrics.items()}
print(metrics, '\n')
return dict(log=metrics, **metrics)
def _visualize(self, val_logs):
def _normalize(image):
image = image[np.newaxis]
image -= image.min()
return image / image.max()
def _save_image(image, tag):
grid = torchvision.utils.make_grid(torch.Tensor(image), nrow=4, pad_value=1)
self.logger.experiment.add_image(tag, grid)
# Only process first size to simplify visualization.
visualize_size = val_logs[0]['output'].shape
val_logs = [x for x in val_logs if x['output'].shape == visualize_size]
num_logs = len(val_logs)
num_viz_images = 16
step = (num_logs + num_viz_images - 1) // num_viz_images
outputs, targets = [], []
for i in range(0, num_logs, step):
#print(val_logs[i]['output'][0].shape)
outputs.append(_normalize( transforms.complex_abs_np(np.moveaxis(val_logs[i]['output'][0],0,2)) )) ######### MZD
targets.append(_normalize( transforms.complex_abs_np(np.moveaxis(val_logs[i]['target'][0],0,2)) )) ######### MZD
outputs = np.stack(outputs)
targets = np.stack(targets)
#print(targets.shape,outputs.shape)
_save_image(targets, 'Target')
_save_image(outputs, 'Reconstruction')
_save_image(np.abs(targets - outputs), 'Error')
def validation_end(self, val_logs):
self._visualize(val_logs)
return self._evaluate(val_logs)
def test_end(self, test_logs):
outputs = defaultdict(list)
for log in test_logs:
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
for fname in outputs:
outputs[fname] = np.stack([out for _, out in sorted(outputs[fname])])
save_reconstructions(outputs, self.hparams.exp_dir / self.hparams.exp / 'reconstructions')
return dict()
| 5,918 | 39.265306 | 124 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/unet_model.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans, out_chans, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob)
)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
def __repr__(self):
return f'ConvBlock(in_chans={self.in_chans}, out_chans={self.out_chans}, ' \
f'drop_prob={self.drop_prob})'
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose layers followed by
instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans, out_chans):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(in_chans, out_chans, kernel_size=2, stride=2, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
def __repr__(self):
return f'ConvBlock(in_chans={self.in_chans}, out_chans={self.out_chans})'
class UnetModel(nn.Module):
"""
PyTorch implementation of a U-Net model.
This is based on:
Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical image
computing and computer-assisted intervention, pages 234–241. Springer, 2015.
"""
def __init__(self, in_chans, out_chans, chans, num_pool_layers, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input to the U-Net model.
out_chans (int): Number of channels in the output to the U-Net model.
chans (int): Number of output channels of the first convolution layer.
num_pool_layers (int): Number of down-sampling and up-sampling layers.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for i in range(num_pool_layers - 1):
self.down_sample_layers += [ConvBlock(ch, ch * 2, drop_prob)]
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for i in range(num_pool_layers - 1):
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [ConvBlock(ch * 2, ch, drop_prob)]
ch //= 2
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)]
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
stack = []
output = input
# Apply down-sampling layers
for i, layer in enumerate(self.down_sample_layers):
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
#print(output.shape,input.shape)
# Apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# Reflect pad on the right/botton if needed to handle odd input dimensions.
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # Padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # Padding bottom
if sum(padding) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
### For Demo
from .mri_model import MRIModel
class UnetMRIModelDemo(MRIModel):
def __init__(self, hparams):
super().__init__(hparams)
self.unet = UnetModel(
in_chans=hparams.in_chans, ############################################################## MZD
out_chans=hparams.in_chans, ############################################################## MZD
chans=hparams.num_chans,
num_pool_layers=hparams.num_pools,
drop_prob=hparams.drop_prob
)
def forward(self, input):
return self.unet(input) #(input.unsqueeze(1)).squeeze(1) ############## MZD
@staticmethod
def add_model_specific_args(parser):
parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')
parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--lr-step-size', type=int, default=40,
help='Period of learning rate decay')
parser.add_argument('--lr-gamma', type=float, default=0.1,
help='Multiplicative factor of learning rate decay')
parser.add_argument('--weight-decay', type=float, default=0.,
help='Strength of weight decay regularization')
parser.add_argument('--mask_type',default='random')
parser.add_argument('--in-chans', type=int, default=2, help='Number of U-Net input (and output) channels')
return parser
| 8,124 | 36.790698 | 114 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/train_unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import random
import numpy as np
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.logging import TestTubeLogger
from torch.nn import functional as F
from torch.optim import RMSprop
from .common.args import Args
from .common.subsample import create_mask_for_mask_type
from .data import transforms
from .mri_model import MRIModel
from .unet_model import UnetModel
#torch.backends.cudnn.enabled = True
#torch.backends.cudnn.benchmark = True
#torch.cuda.set_device(3)
import os
#os.environ['CUDA_VISIBLE_DEVICES']='3'
#_Trainer__set_random_port()
class DataTransform:
"""
Data Transformer for training U-Net models.
"""
def __init__(self, resolution, which_challenge, mask_func=None, use_seed=True):
"""
Args:
mask_func (common.subsample.MaskFunc): A function that can create a mask of
appropriate shape.
resolution (int): Resolution of the image.
which_challenge (str): Either "singlecoil" or "multicoil" denoting the dataset.
use_seed (bool): If true, this class computes a pseudo random number generator seed
from the filename. This ensures that the same mask is used for all the slices of
a given volume every time.
"""
if which_challenge not in ('singlecoil', 'multicoil'):
raise ValueError(f'Challenge should either be "singlecoil" or "multicoil"')
self.mask_func = mask_func
self.resolution = resolution
self.which_challenge = which_challenge
self.use_seed = use_seed
def __call__(self, kspace, target, attrs, fname, slice):
"""
Args:
kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
data or (rows, cols, 2) for single coil data.
target (numpy.array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object.
fname (str): File name
slice (int): Serial number of the slice.
Returns:
(tuple): tuple containing:
image (torch.Tensor): Zero-filled input image.
target (torch.Tensor): Target image converted to a torch Tensor.
mean (float): Mean value used for normalization.
std (float): Standard deviation value used for normalization.
"""
kspace = transforms.to_tensor(kspace)
# Apply mask
if self.mask_func:
seed = None if not self.use_seed else tuple(map(ord, fname))
masked_kspace, mask = transforms.apply_mask(kspace, self.mask_func, seed)
else:
masked_kspace = kspace
# Inverse Fourier Transform to get zero filled solution
image = transforms.ifft2(masked_kspace)
# Crop input image to given resolution if larger
smallest_width = min(self.resolution, image.shape[-2])
smallest_height = min(self.resolution, image.shape[-3])
if target is not None:
smallest_width = min(smallest_width, target.shape[-1])
smallest_height = min(smallest_height, target.shape[-2])
crop_size = (smallest_height, smallest_width)
######################################## NO CROP ################################################## MZD
'''
image = transforms.complex_center_crop(image, crop_size)
##############
temp = image.clone()
temp = torch.zeros([image.shape[0],self.resolution,self.resolution,image.shape[-1]])
width_diff = (self.resolution-image.shape[-2])//2
height_diff = (self.resolution-image.shape[-3])//2
ws = width_diff + int(image.shape[-2]%2)
we = temp.shape[-2]-width_diff
#print(ws,we,width_diff,image.shape)
hs = height_diff + int(image.shape[-3]%2)
he = temp.shape[-3]-height_diff
temp[:,hs:he,ws:we,:] = image
# Absolute value
image = transforms.complex_abs(temp) ############
'''
################################################################################################### MZD
# Apply Root-Sum-of-Squares if multicoil data
if self.which_challenge == 'multicoil':
image = transforms.root_sum_of_squares(image)
image = torch.moveaxis(image , 2 , 0) ############################# MZD
# Normalize input
image, mean, std = transforms.normalize_instance(image, eps=1e-11)
image = image.clamp(-6, 6)
#print(image.shape)
# Normalize target
if target is not None:
target = transforms.ifft2(kspace) ############################# MZD
target = torch.moveaxis( transforms.root_sum_of_squares(target) , 2 , 0) ############################# MZD
#print(target.shape)
#im = transform.complex_abs(kspace)
############################### NO CROP - TARGET IS IFFT2(KSPACE) ##################################### MZD
'''
target = transforms.to_tensor(target)
target = transforms.center_crop(target, crop_size)
#print(target.shape)
##############
temp = target.clone()
temp = torch.zeros([self.resolution,self.resolution])
width_diff = (self.resolution-target.shape[-1])//2
height_diff = (self.resolution-target.shape[-2])//2
ws = width_diff + int(target.shape[-1]%2)
we = temp.shape[-1]-width_diff
hs = height_diff + int(target.shape[-2]%2)
he = temp.shape[-2]-height_diff
temp[hs:he,ws:we] = target
###############
'''
##################################################################################### MZD
target = transforms.normalize(target, mean, std, eps=1e-11)
target = target.clamp(-6, 6)
else:
target = torch.Tensor([0])
return image, target, mean, std, fname, slice
class UnetMRIModel(MRIModel):
def __init__(self, hparams):
super().__init__(hparams)
self.unet = UnetModel(
in_chans=hparams.in_chans, ############################################################## MZD
out_chans=hparams.in_chans, ############################################################## MZD
chans=hparams.num_chans,
num_pool_layers=hparams.num_pools,
drop_prob=hparams.drop_prob
)
def forward(self, input):
return self.unet(input) #(input.unsqueeze(1)).squeeze(1) ############## MZD
def training_step(self, batch, batch_idx):
input, target, mean, std, _, _ = batch
#print(input.shape,target.shape)
output = self.forward(input)
loss = F.l1_loss(output, target)
logs = {'loss': loss.item()}
return dict(loss=loss, log=logs)
def validation_step(self, batch, batch_idx):
input, target, mean, std, fname, slice = batch
output = self.forward(input)
#print(output.shape)
mean = mean.unsqueeze(1).unsqueeze(2)
std = std.unsqueeze(1).unsqueeze(2)
return {
'fname': fname,
'slice': slice,
'output': (output * std + mean).cpu().numpy(),
'target': (target * std + mean).cpu().numpy(),
'val_loss': F.l1_loss(output, target),
}
def test_step(self, batch, batch_idx):
input, _, mean, std, fname, slice = batch
output = self.forward(input)
mean = mean.unsqueeze(1).unsqueeze(2)
std = std.unsqueeze(1).unsqueeze(2)
return {
'fname': fname,
'slice': slice,
'output': (output * std + mean).cpu().numpy(),
}
def configure_optimizers(self):
optim = RMSprop(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optim, self.hparams.lr_step_size, self.hparams.lr_gamma)
return [optim], [scheduler]
def train_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, self.hparams.challenge, mask, use_seed=False)
def val_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, self.hparams.challenge, mask)
def test_data_transform(self):
return DataTransform(self.hparams.resolution, self.hparams.challenge)
@staticmethod
def add_model_specific_args(parser):
parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')
parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--lr-step-size', type=int, default=40,
help='Period of learning rate decay')
parser.add_argument('--lr-gamma', type=float, default=0.1,
help='Multiplicative factor of learning rate decay')
parser.add_argument('--weight-decay', type=float, default=0.,
help='Strength of weight decay regularization')
parser.add_argument('--mask_type',default='random')
parser.add_argument('--in-chans', type=int, default=2, help='Number of U-Net input (and output) channels')
return parser
def create_trainer(args, logger):
return Trainer(
#num_nodes=1,
logger=logger,
default_save_path=args.exp_dir,
checkpoint_callback=True,
max_nb_epochs=args.num_epochs,
gpus=args.gpus,
distributed_backend='ddp',
check_val_every_n_epoch=1,
val_check_interval=1.,
early_stop_callback=False
)
def main(args):
if args.mode == 'train':
load_version = 0 if args.resume else None
logger = TestTubeLogger(save_dir=args.exp_dir, name=args.exp, version=load_version)
trainer = create_trainer(args, logger)
model = UnetMRIModel(args)
trainer.fit(model)
else: # args.mode == 'test'
assert args.checkpoint is not None
model = UnetMRIModel.load_from_checkpoint(str(args.checkpoint))
model.hparams.sample_rate = 1.
trainer = create_trainer(args, logger=False)
trainer.test(model)
if __name__ == '__main__':
parser = Args()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--num-epochs', type=int, default=50, help='Number of training epochs')
parser.add_argument('--gpus', type=int, default=1)
parser.add_argument('--exp-dir', type=pathlib.Path, default='experiments',
help='Path where model and results should be saved')
parser.add_argument('--exp', type=str, help='Name of the experiment')
parser.add_argument('--checkpoint', type=pathlib.Path,
help='Path to pre-trained model. Use with --mode test')
parser.add_argument('--resume', action='store_true',
help='If set, resume the training from a previous model checkpoint. ')
parser = UnetMRIModel.add_model_specific_args(parser)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
main(args)
| 12,100 | 41.609155 | 119 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/helpers.py | import torch
import numpy as np
from torch.autograd import Variable
dtype = torch.cuda.FloatTensor
class MaskFunc:
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
MaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the k-space data has N
columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center corresponding to
low-frequencies
2. The other columns are selected uniformly at random with a probability equal to:
prob = (N / acceleration - N_low_freqs) / (N - N_low_freqs).
This ensures that the expected number of columns selected is equal to (N / acceleration)
"""
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly
each time.
accelerations (List[int]): Amount of under-sampling. This should have the same length
as center_fractions. If multiple values are provided, then one of these is chosen
uniformly each time. An acceleration of 4 retains 25% of the columns, but they may
not be spaced evenly.
"""
if len(center_fractions) != len(accelerations):
raise ValueError('Number of center fractions should match number of accelerations')
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
self.rng.seed(seed)
num_cols = shape[-2]
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
# Create the mask
num_low_freqs = int(round(num_cols * center_fraction))
prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
mask = self.rng.uniform(size=num_cols) < prob
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def np_to_var(img_np, dtype = torch.cuda.FloatTensor):
'''
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Converts image in numpy.array to torch.Variable.
From C x W x H [0..1] to 1 x C x W x H [0..1]
'''
return Variable(torch.from_numpy(img_np)[None, :])
def var_to_np(img_var):
'''
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Converts an image in torch.Variable format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.data.cpu().numpy()[0]
def ksp2measurement(ksp):
return np_to_var( np.transpose( np.array([np.real(ksp),np.imag(ksp)]) , (1, 2, 3, 0)) )
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def rss_torch(im):
'''
Apply the root sum of squares algorithm to coil images
'''
return torch.sqrt(torch.sum(torch.abs(im) ** 2, 0))
def crop_center(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
def my_crop(data,shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[w_from:w_to, h_from:h_to,...]
def channels2imgs(out):
sh = out.shape
chs = int(sh[0]/2)
imgs = np.zeros( (chs,sh[1],sh[2]) )
for i in range(chs):
imgs[i] = np.sqrt( out[2*i]**2 + out[2*i+1]**2 )
return imgs
def forwardm(img,mask):
# img has dimension (2*num_slices, x,y)
# output has dimension (1, num_slices, x, y, 2)
mask = np_to_var(mask)[0].type(dtype)
s = img.shape
ns = int(s[1]/2) # number of slices
fimg = Variable( torch.zeros( (s[0],ns,s[2],s[3],2 ) ) ).type(dtype)
for i in range(ns):
fimg[0,i,:,:,0] = img[0,2*i,:,:]
fimg[0,i,:,:,1] = img[0,2*i+1,:,:]
Fimg = fft2(fimg) # dim: (1,num_slices,x,y,2)
for i in range(ns):
Fimg[0,i,:,:,0] *= mask
Fimg[0,i,:,:,1] *= mask
return Fimg
def get_mask(slice_ksp_torchtensor, slice_ksp,factor=4,cent=0.07):
try: # if the file already has a mask
temp = np.array([1 if e else 0 for e in f["mask"]])
temp = temp[np.newaxis].T
temp = np.array([[temp]])
mask = to_tensor(temp).type(dtype).detach().cpu()
except: # if we need to create a mask
desired_factor = factor # desired under-sampling factor
undersampling_factor = 0
tolerance = 0.03
while undersampling_factor < desired_factor - tolerance or undersampling_factor > desired_factor + tolerance:
mask_func = MaskFunc(center_fractions=[cent], accelerations=[desired_factor]) # Create the mask function object
masked_kspace, mask = apply_mask(slice_ksp_torchtensor, mask_func=mask_func) # Apply the mask to k-space
mask1d = var_to_np(mask)[0,:,0]
undersampling_factor = len(mask1d) / sum(mask1d)
mask1d = var_to_np(mask)[0,:,0]
# The provided mask and data have last dim of 368, but the actual data is smaller.
# To prevent forcing the network to learn outside the data region, we force the mask to 0 there.
mask1d[:mask1d.shape[-1]//2-160] = 0
mask1d[mask1d.shape[-1]//2+160:] =0
mask2d = np.repeat(mask1d[None,:], slice_ksp.shape[1], axis=0).astype(int) # Turning 1D Mask into 2D that matches data dimensions
mask2d = np.pad(mask2d,((0,),((slice_ksp.shape[-1]-mask2d.shape[-1])//2,)),mode='constant') # Zero padding to make sure dimensions match up
mask = to_tensor( np.array( [[mask2d[0][np.newaxis].T]] ) ).type(dtype).detach().cpu()
return mask, mask1d, mask2d
def apply_mask(data, mask_func = None, mask = None, seed=None):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
if mask is None:
mask = mask_func(shape, seed)
return data * mask, mask
def fft(input, signal_ndim, normalized=False):
# This function is called from the fft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.fftn(torch.view_as_complex(input), dim=dims, norm=norm))
def ifft(input, signal_ndim, normalized=False):
# This function is called from the ifft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.ifftn(torch.view_as_complex(input), dim=dims, norm=norm))
def fft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2 dimensional Fast Fourier Transform. It calls the fft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2-dimensional Inverse Fast Fourier Transform. It calls the ifft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def complex_abs(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def fftshift(x, dim=None):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def roll(x, shift, dim):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
| 13,056 | 36.412607 | 155 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/common/utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import json
import h5py
def save_reconstructions(reconstructions, out_dir):
"""
Saves the reconstructions from a model into h5 files that is appropriate for submission
to the leaderboard.
Args:
reconstructions (dict[str, np.array]): A dictionary mapping input filenames to
corresponding reconstructions (of shape num_slices x height x width).
out_dir (pathlib.Path): Path to the output directory where the reconstructions
should be saved.
"""
out_dir.mkdir(exist_ok=True)
for fname, recons in reconstructions.items():
with h5py.File(out_dir / fname, 'w') as f:
f.create_dataset('reconstruction', data=recons)
def tensor_to_complex_np(data):
"""
Converts a complex torch tensor to numpy array.
Args:
data (torch.Tensor): Input data to be converted to numpy.
Returns:
np.array: Complex numpy version of data
"""
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
| 1,187 | 28.7 | 91 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/common/test_subsample.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import pytest
import torch
from common.subsample import MaskFunc
@pytest.mark.parametrize("center_fracs, accelerations, batch_size, dim", [
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
])
def test_mask_reuse(center_fracs, accelerations, batch_size, dim):
mask_func = MaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask1 = mask_func(shape, seed=123)
mask2 = mask_func(shape, seed=123)
mask3 = mask_func(shape, seed=123)
assert torch.all(mask1 == mask2)
assert torch.all(mask2 == mask3)
@pytest.mark.parametrize("center_fracs, accelerations, batch_size, dim", [
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
])
def test_mask_low_freqs(center_fracs, accelerations, batch_size, dim):
mask_func = MaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask = mask_func(shape, seed=123)
mask_shape = [1 for _ in shape]
mask_shape[-2] = dim
assert list(mask.shape) == mask_shape
num_low_freqs_matched = False
for center_frac in center_fracs:
num_low_freqs = int(round(dim * center_frac))
pad = (dim - num_low_freqs + 1) // 2
if np.all(mask[pad:pad + num_low_freqs].numpy() == 1):
num_low_freqs_matched = True
assert num_low_freqs_matched
| 1,506 | 30.395833 | 74 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/common/subsample.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def create_mask_for_mask_type(mask_type_str, center_fractions, accelerations):
if mask_type_str == 'random':
return RandomMaskFunc(center_fractions, accelerations)
elif mask_type_str == 'equispaced':
return EquispacedMaskFunc(center_fractions, accelerations)
else:
raise Exception(f"{mask_type_str} not supported")
class MaskFunc():
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly
each time.
accelerations (List[int]): Amount of under-sampling. This should have the same length
as center_fractions. If multiple values are provided, then one of these is chosen
uniformly each time.
"""
if len(center_fractions) != len(accelerations):
raise ValueError('Number of center fractions should match number of accelerations')
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def choose_acceleration(self):
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
return center_fraction, acceleration
class RandomMaskFunc(MaskFunc):
"""
RandomMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the k-space data has N
columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center corresponding to
low-frequencies
2. The other columns are selected uniformly at random with a probability equal to:
prob = (N / acceleration - N_low_freqs) / (N - N_low_freqs).
This ensures that the expected number of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which case one possible
(center_fraction, acceleration) is chosen uniformly at random each time the RandomMaskFunc object is
called.
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04], then there
is a 50% probability that 4-fold acceleration with 8% center fraction is selected and a 50%
probability that 8-fold acceleration with 4% center fraction is selected.
"""
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly
each time.
accelerations (List[int]): Amount of under-sampling. This should have the same length
as center_fractions. If multiple values are provided, then one of these is chosen
uniformly each time. An acceleration of 4 retains 25% of the columns, but they may
not be spaced evenly.
"""
if len(center_fractions) != len(accelerations):
raise ValueError('Number of center fractions should match number of accelerations')
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
self.rng.seed(seed)
num_cols = shape[-2]
center_fraction, acceleration = self.choose_acceleration()
# Create the mask
num_low_freqs = int(round(num_cols * center_fraction))
prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
mask = self.rng.uniform(size=num_cols) < prob
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
class EquispacedMaskFunc(MaskFunc):
"""
EquispacedMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the k-space data has N
columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center corresponding to
low-frequencies
2. The other columns are selected with equal spacing at a proportion that reaches the
desired acceleration rate taking into consideration the number of low frequencies. This
ensures that the expected number of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which case one possible
(center_fraction, acceleration) is chosen uniformly at random each time the EquispacedMaskFunc
object is called.
"""
def __call__(self, shape, seed):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
self.rng.seed(seed)
center_fraction, acceleration = self.choose_acceleration()
num_cols = shape[-2]
num_low_freqs = int(round(num_cols * center_fraction))
# Create the mask
mask = np.zeros(num_cols, dtype=np.float32)
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_freqs - num_cols)) / (num_low_freqs * acceleration - num_cols)
offset = self.rng.randint(0, round(adjusted_accel))
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
accel_samples = np.around(accel_samples).astype(np.uint)
mask[accel_samples] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
| 7,423 | 42.415205 | 112 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/data/mri_data.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import random
import h5py
from torch.utils.data import Dataset
class SliceData(Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
"""
def __init__(self, root, transform, challenge, sample_rate=1):
"""
Args:
root (pathlib.Path): Path to the dataset.
transform (callable): A callable object that pre-processes the raw data into
appropriate form. The transform function should take 'kspace', 'target',
'attributes', 'filename', and 'slice' as inputs. 'target' may be null
for test data.
challenge (str): "singlecoil" or "multicoil" depending on which challenge to use.
sample_rate (float, optional): A float between 0 and 1. This controls what fraction
of the volumes should be loaded.
"""
if challenge not in ('singlecoil', 'multicoil'):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
self.transform = transform
self.recons_key = 'reconstruction_esc' if challenge == 'singlecoil' \
else 'reconstruction_rss'
self.examples = []
files = list(pathlib.Path(root).iterdir())
if sample_rate < 1:
random.shuffle(files)
num_files = round(len(files) * sample_rate)
files = files[:num_files]
for fname in sorted(files):
kspace = h5py.File(fname, 'r')['kspace']
num_slices = kspace.shape[0]
self.examples += [(fname, slice) for slice in range(num_slices)]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
fname, slice = self.examples[i]
with h5py.File(fname, 'r') as data:
kspace = data['kspace'][slice]
target = data[self.recons_key][slice] if self.recons_key in data else None
return self.transform(kspace, target, data.attrs, fname.name, slice)
| 2,181 | 35.983051 | 95 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/data/test_transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import pytest
import torch
from common import utils
from common.subsample import RandomMaskFunc
from data import transforms
def create_input(shape):
input = np.arange(np.product(shape)).reshape(shape)
input = torch.from_numpy(input).float()
return input
@pytest.mark.parametrize('shape, center_fractions, accelerations', [
([4, 32, 32, 2], [0.08], [4]),
([2, 64, 64, 2], [0.04, 0.08], [8, 4]),
])
def test_apply_mask(shape, center_fractions, accelerations):
mask_func = RandomMaskFunc(center_fractions, accelerations)
expected_mask = mask_func(shape, seed=123)
input = create_input(shape)
output, mask = transforms.apply_mask(input, mask_func, seed=123)
assert output.shape == input.shape
assert mask.shape == expected_mask.shape
assert np.all(expected_mask.numpy() == mask.numpy())
assert np.all(np.where(mask.numpy() == 0, 0, output.numpy()) == output.numpy())
@pytest.mark.parametrize('shape', [
[3, 3],
[4, 6],
[10, 8, 4],
])
def test_fft2(shape):
shape = shape + [2]
input = create_input(shape)
out_torch = transforms.fft2(input).numpy()
out_torch = out_torch[..., 0] + 1j * out_torch[..., 1]
input_numpy = utils.tensor_to_complex_np(input)
input_numpy = np.fft.ifftshift(input_numpy, (-2, -1))
out_numpy = np.fft.fft2(input_numpy, norm='ortho')
out_numpy = np.fft.fftshift(out_numpy, (-2, -1))
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape', [
[3, 3],
[4, 6],
[10, 8, 4],
])
def test_ifft2(shape):
shape = shape + [2]
input = create_input(shape)
out_torch = transforms.ifft2(input).numpy()
out_torch = out_torch[..., 0] + 1j * out_torch[..., 1]
input_numpy = utils.tensor_to_complex_np(input)
input_numpy = np.fft.ifftshift(input_numpy, (-2, -1))
out_numpy = np.fft.ifft2(input_numpy, norm='ortho')
out_numpy = np.fft.fftshift(out_numpy, (-2, -1))
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape', [
[3, 3],
[4, 6],
[10, 8, 4],
])
def test_complex_abs(shape):
shape = shape + [2]
input = create_input(shape)
out_torch = transforms.complex_abs(input).numpy()
input_numpy = utils.tensor_to_complex_np(input)
out_numpy = np.abs(input_numpy)
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape, dim', [
[[3, 3], 0],
[[4, 6], 1],
[[10, 8, 4], 2],
])
def test_root_sum_of_squares(shape, dim):
input = create_input(shape)
out_torch = transforms.root_sum_of_squares(input, dim).numpy()
out_numpy = np.sqrt(np.sum(input.numpy() ** 2, dim))
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape, target_shape', [
[[10, 10], [4, 4]],
[[4, 6], [2, 4]],
[[8, 4], [4, 4]],
])
def test_center_crop(shape, target_shape):
input = create_input(shape)
out_torch = transforms.center_crop(input, target_shape).numpy()
assert list(out_torch.shape) == target_shape
@pytest.mark.parametrize('shape, target_shape', [
[[10, 10], [4, 4]],
[[4, 6], [2, 4]],
[[8, 4], [4, 4]],
])
def test_complex_center_crop(shape, target_shape):
shape = shape + [2]
input = create_input(shape)
out_torch = transforms.complex_center_crop(input, target_shape).numpy()
assert list(out_torch.shape) == target_shape + [2, ]
@pytest.mark.parametrize('shape, mean, stddev', [
[[10, 10], 0, 1],
[[4, 6], 4, 10],
[[8, 4], 2, 3],
])
def test_normalize(shape, mean, stddev):
input = create_input(shape)
output = transforms.normalize(input, mean, stddev).numpy()
assert np.isclose(output.mean(), (input.numpy().mean() - mean) / stddev)
assert np.isclose(output.std(), input.numpy().std() / stddev)
@pytest.mark.parametrize('shape', [
[10, 10],
[20, 40, 30],
])
def test_normalize_instance(shape):
input = create_input(shape)
output, mean, stddev = transforms.normalize_instance(input)
output = output.numpy()
assert np.isclose(input.numpy().mean(), mean, rtol=1e-2)
assert np.isclose(input.numpy().std(), stddev, rtol=1e-2)
assert np.isclose(output.mean(), 0, rtol=1e-2, atol=1e-3)
assert np.isclose(output.std(), 1, rtol=1e-2, atol=1e-3)
@pytest.mark.parametrize('shift, dim', [
(0, 0),
(1, 0),
(-1, 0),
(100, 0),
((1, 2), (1, 2)),
])
@pytest.mark.parametrize('shape', [
[5, 6, 2],
[3, 4, 5],
])
def test_roll(shift, dim, shape):
input = np.arange(np.product(shape)).reshape(shape)
out_torch = transforms.roll(torch.from_numpy(input), shift, dim).numpy()
out_numpy = np.roll(input, shift, dim)
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape', [
[5, 3],
[2, 4, 6],
])
def test_fftshift(shape):
input = np.arange(np.product(shape)).reshape(shape)
out_torch = transforms.fftshift(torch.from_numpy(input)).numpy()
out_numpy = np.fft.fftshift(input)
assert np.allclose(out_torch, out_numpy)
@pytest.mark.parametrize('shape', [
[5, 3],
[2, 4, 5],
[2, 7, 5],
])
def test_ifftshift(shape):
input = np.arange(np.product(shape)).reshape(shape)
out_torch = transforms.ifftshift(torch.from_numpy(input)).numpy()
out_numpy = np.fft.ifftshift(input)
assert np.allclose(out_torch, out_numpy)
| 5,497 | 28.244681 | 83 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/data/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def apply_mask(data, mask_func, seed=None, padding=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
if padding is not None:
mask[:, :, :padding[0]] = 0
mask[:, :, padding[1]:] = 0 # padding value inclusive on right of zeros
masked_data = data * mask + 0.0 # The + 0.0 removes the sign of the zeros
return masked_data, mask
def mask_center(x, mask_from, mask_to):
b, c, h, w, two = x.shape
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def complex_mul(x, y):
assert x.shape[-1] == y.shape[-1] == 2
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x):
assert x.shape[-1] == 2
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
'''
def fft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
'''
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_np(data): ############################################### MZD
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.shape[-1] == 2
return np.sqrt( (data ** 2).sum(axis=-1) )
def complex_abs_sq(data):
"""
Compute the squared absolute value of a complex tensor
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1)
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def root_sum_of_squares_complex(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt(complex_abs_sq(data).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(x, y):
"""
Apply a center crop on the larger image to the size of the smaller image.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def fft(input, signal_ndim, normalized=False):
# This function is called from the fft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.fftn(torch.view_as_complex(input), dim=dims, norm=norm))
def ifft(input, signal_ndim, normalized=False):
# This function is called from the ifft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.ifftn(torch.view_as_complex(input), dim=dims, norm=norm))
def fft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2 dimensional Fast Fourier Transform. It calls the fft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2-dimensional Inverse Fast Fourier Transform. It calls the ifft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
| 11,863 | 32.047354 | 155 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/include/mri_helpers.py | import torch
import torch.nn as nn
import torchvision
import sys
import numpy as np
from PIL import Image
import PIL
import numpy as np
from torch.autograd import Variable
import random
import numpy as np
import torch
import matplotlib.pyplot as plt
from PIL import Image
import PIL
from torch.autograd import Variable
dtype = torch.cuda.FloatTensor
from . import transforms as transform
from .helpers import var_to_np,np_to_var
import numpy
import scipy.signal
import scipy.ndimage
def ksp2measurement(ksp):
return np_to_var( np.transpose( np.array([np.real(ksp),np.imag(ksp)]) , (1, 2, 3, 0)) )
def lsreconstruction(measurement,mode='both'):
# measurement has dimension (1, num_slices, x, y, 2)
fimg = transform.ifft2(measurement)
normimag = torch.norm(fimg[:,:,:,:,0])
normreal = torch.norm(fimg[:,:,:,:,1])
#print("real/img parts: ",normimag, normreal)
if mode == 'both':
return torch.sqrt(fimg[:,:,:,:,0]**2 + fimg[:,:,:,:,1]**2)
elif mode == 'real':
return torch.tensor(fimg[:,:,:,:,0]) #torch.sqrt(fimg[:,:,:,:,0]**2)
elif mode == 'imag':
return torch.sqrt(fimg[:,:,:,:,1]**2)
def root_sum_of_squares2(lsimg):
out = np.zeros(lsimg[0].shape)
for img in lsimg:
out += img**2
return np.sqrt(out)
def crop_center2(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
def channels2imgs(out):
sh = out.shape
chs = int(sh[0]/2)
imgs = np.zeros( (chs,sh[1],sh[2]) )
for i in range(chs):
imgs[i] = np.sqrt( out[2*i]**2 + out[2*i+1]**2 )
return imgs
def forwardm(img,mask):
# img has dimension (2*num_slices, x,y)
# output has dimension (1, num_slices, x, y, 2)
mask = np_to_var(mask)[0].type(dtype)
s = img.shape
ns = int(s[1]/2) # number of slices
fimg = Variable( torch.zeros( (s[0],ns,s[2],s[3],2 ) ) ).type(dtype)
for i in range(ns):
fimg[0,i,:,:,0] = img[0,2*i,:,:]
fimg[0,i,:,:,1] = img[0,2*i+1,:,:]
Fimg = transform.fft2(fimg) # dim: (1,num_slices,x,y,2)
for i in range(ns):
Fimg[0,i,:,:,0] *= mask
Fimg[0,i,:,:,1] *= mask
return Fimg
def get_scale_factor(net,num_channels,in_size,slice_ksp,scale_out=1,scale_type="norm"):
### get norm of deep decoder output
# get net input, scaling of that is irrelevant
shape = [1,num_channels, in_size[0], in_size[1]]
ni = Variable(torch.zeros(shape)).type(dtype)
ni.data.uniform_()
# generate random image
try:
out_chs = net( ni.type(dtype),scale_out=scale_out ).data.cpu().numpy()[0]
except:
out_chs = net( ni.type(dtype) ).data.cpu().numpy()[0]
out_imgs = channels2imgs(out_chs)
out_img_tt = transform.root_sum_of_squares( torch.tensor(out_imgs) , dim=0)
### get norm of least-squares reconstruction
ksp_tt = transform.to_tensor(slice_ksp)
orig_tt = transform.ifft2(ksp_tt) # Apply Inverse Fourier Transform to get the complex image
orig_imgs_tt = transform.complex_abs(orig_tt) # Compute absolute value to get a real image
orig_img_tt = transform.root_sum_of_squares(orig_imgs_tt, dim=0)
orig_img_np = orig_img_tt.cpu().numpy()
if scale_type == "norm":
s = np.linalg.norm(out_img_tt) / np.linalg.norm(orig_img_np)
if scale_type == "mean":
s = (out_img_tt.mean() / orig_img_np.mean()).numpy()[np.newaxis][0]
return s,ni
def data_consistency(parnet, parni, mask1d, slice_ksp_torchtensor1):
img = parnet(parni.type(dtype))
s = img.shape
ns = int(s[1]/2) # number of slices
fimg = Variable( torch.zeros( (s[0],ns,s[2],s[3],2 ) ) ).type(dtype)
for i in range(ns):
fimg[0,i,:,:,0] = img[0,2*i,:,:]
fimg[0,i,:,:,1] = img[0,2*i+1,:,:]
Fimg = transform.fft2(fimg) # dim: (1,num_slices,x,y,2)
# ksp has dim: (num_slices,x,y)
meas = slice_ksp_torchtensor1.unsqueeze(0) # dim: (1,num_slices,x,y,2)
mask = torch.from_numpy(np.array(mask1d, dtype=np.uint8))
ksp_dc = Fimg.clone()
ksp_dc = ksp_dc.detach().cpu()
ksp_dc[:,:,:,mask==1,:] = meas[:,:,:,mask==1,:] # after data consistency block
img_dc = transform.ifft2(ksp_dc)[0]
out = []
for img in img_dc.detach().cpu():
out += [ img[:,:,0].numpy() , img[:,:,1].numpy() ]
par_out_chs = np.array(out)
par_out_imgs = channels2imgs(par_out_chs)
# deep decoder reconstruction
prec = root_sum_of_squares2(par_out_imgs)
if prec.shape[0] > 320:
prec = crop_center2(prec,320,320)
return prec | 4,616 | 32.215827 | 106 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/include/helpers.py | import torch
import torch.nn as nn
import torchvision
import sys
import numpy as np
from PIL import Image
import PIL
import numpy as np
from torch.autograd import Variable
import random
import numpy as np
import torch
import matplotlib.pyplot as plt
from PIL import Image
import PIL
from torch.autograd import Variable
def myimgshow(plt,img):
if(img.shape[0] == 1):
plt.imshow(np.clip(img[0],0,1),cmap='Greys',interpolation='none')
else:
plt.imshow(np.clip(img.transpose(1, 2, 0),0,1),interpolation='none')
def load_and_crop(imgname,target_width=512,target_height=512):
'''
imgname: string of image location
load an image, and center-crop if the image is large enough, else return none
'''
img = Image.open(imgname)
width, height = img.size
if width <= target_width or height <= target_height:
return None
left = (width - target_width)/2
top = (height - target_height)/2
right = (width + target_width)/2
bottom = (height + target_height)/2
return img.crop((left, top, right, bottom))
def save_np_img(img,filename):
if(img.shape[0] == 1):
plt.imshow(np.clip(img[0],0,1),cmap='Greys',interpolation='nearest')
else:
plt.imshow(np.clip(img.transpose(1, 2, 0),0,1))
plt.axis('off')
plt.savefig(filename, bbox_inches='tight')
plt.close()
def np_to_tensor(img_np):
'''Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np)
def np_to_var(img_np, dtype = torch.cuda.FloatTensor):
'''Converts image in numpy.array to torch.Variable.
From C x W x H [0..1] to 1 x C x W x H [0..1]
'''
return Variable(np_to_tensor(img_np)[None, :])
def var_to_np(img_var):
'''Converts an image in torch.Variable format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.data.cpu().numpy()[0]
def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2,0,1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def rgb2ycbcr(img):
#out = color.rgb2ycbcr( img.transpose(1, 2, 0) )
#return out.transpose(2,0,1)/256.
r,g,b = img[0],img[1],img[2]
y = 0.299*r+0.587*g+0.114*b
cb = 0.5 - 0.168736*r - 0.331264*g + 0.5*b
cr = 0.5 + 0.5*r - 0.418588*g - 0.081312*b
return np.array([y,cb,cr])
def ycbcr2rgb(img):
#out = color.ycbcr2rgb( 256.*img.transpose(1, 2, 0) )
#return (out.transpose(2,0,1) - np.min(out))/(np.max(out)-np.min(out))
y,cb,cr = img[0],img[1],img[2]
r = y + 1.402*(cr-0.5)
g = y - 0.344136*(cb-0.5) - 0.714136*(cr-0.5)
b = y + 1.772*(cb - 0.5)
return np.array([r,g,b])
def mse(x_hat,x_true,maxv=1.):
x_hat = x_hat.flatten()
x_true = x_true.flatten()
mse = np.mean(np.square(x_hat-x_true))
energy = np.mean(np.square(x_true))
return mse/energy
def psnr(x_hat,x_true,maxv=1.):
x_hat = x_hat.flatten()
x_true = x_true.flatten()
mse=np.mean(np.square(x_hat-x_true))
psnr_ = 10.*np.log(maxv**2/mse)/np.log(10.)
return psnr_
def num_param(net):
s = sum([np.prod(list(p.size())) for p in net.parameters()]);
return s
#print('Number of params: %d' % s)
def rgb2gray(rgb):
r, g, b = rgb[0,:,:], rgb[1,:,:], rgb[2,:,:]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return np.array([gray])
def savemtx_for_logplot(A,filename = "exp.dat"):
ind = sorted(list(set([int(i) for i in np.geomspace(1, len(A[0])-1 ,num=700)])))
A = [ [a[i] for i in ind] for a in A]
X = np.array([ind] + A)
np.savetxt(filename, X.T, delimiter=' ')
def get_imgnet_imgs(num_samples = 100, path = '../imagenet/',verbose=False):
perm = [i for i in range(1,50000)]
random.Random(4).shuffle(perm)
siz = 512
file = open("exp_imgnet_imgs.txt","w")
imgs = []
sampled = 0
imgslist = []
for imgnr in perm:
# prepare and select image
# Format is: ILSVRC2012_val_00024995.JPEG
imgnr_str = str(imgnr).zfill(8)
imgname = path + 'ILSVRC2012_val_' + imgnr_str + ".JPEG"
img = load_and_crop(imgname,target_width=512,target_height=512)
if img is None: # then the image could not be croped to 512x512
continue
img_np = pil_to_np(img)
if img_np.shape[0] != 3: # we only want to consider color images
continue
if verbose:
imgslist += ['ILSVRC2012_val_' + imgnr_str + ".JPEG"]
print("cp ", imgname, "./imgs")
imgs += [img_np]
sampled += 1
if sampled >= num_samples:
break
if verbose:
print(imgslist)
return imgs
| 4,860 | 26.308989 | 84 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/include/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def apply_mask(data, mask_func = None, mask = None, seed=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
if mask is None:
mask = mask_func(shape, seed)
return data * mask, mask
def mask_center(x, mask_from, mask_to):
b, c, h, w, two = x.shape
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def complex_mul(x, y):
assert x.shape[-1] == y.shape[-1] == 2
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x):
assert x.shape[-1] == 2
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
'''
def fft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
'''
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_np(data): ############################################### MZD
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.shape[-1] == 2
return np.sqrt( (data ** 2).sum(axis=-1) )
def complex_abs_sq(data):
"""
Compute the squared absolute value of a complex tensor
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1)
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def root_sum_of_squares_complex(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt(complex_abs_sq(data).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(x, y):
"""
Apply a center crop on the larger image to the size of the smaller image.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def fft(input, signal_ndim, normalized=False):
# This function is called from the fft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.fftn(torch.view_as_complex(input), dim=dims, norm=norm))
def ifft(input, signal_ndim, normalized=False):
# This function is called from the ifft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.ifftn(torch.view_as_complex(input), dim=dims, norm=norm))
def fft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2 dimensional Fast Fourier Transform. It calls the fft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2-dimensional Inverse Fast Fourier Transform. It calls the ifft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
| 11,673 | 31.70028 | 155 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/unet/functions/include/pytorch_ssim/__init__.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
"""def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)"""
| 2,641 | 34.702703 | 104 | py |
DCEC | DCEC-master/ConvAE.py | from keras.layers import Conv2D, Conv2DTranspose, Dense, Flatten, Reshape
from keras.models import Sequential, Model
from keras.utils.vis_utils import plot_model
import numpy as np
def CAE(input_shape=(28, 28, 1), filters=[32, 64, 128, 10]):
model = Sequential()
if input_shape[0] % 8 == 0:
pad3 = 'same'
else:
pad3 = 'valid'
model.add(Conv2D(filters[0], 5, strides=2, padding='same', activation='relu', name='conv1', input_shape=input_shape))
model.add(Conv2D(filters[1], 5, strides=2, padding='same', activation='relu', name='conv2'))
model.add(Conv2D(filters[2], 3, strides=2, padding=pad3, activation='relu', name='conv3'))
model.add(Flatten())
model.add(Dense(units=filters[3], name='embedding'))
model.add(Dense(units=filters[2]*int(input_shape[0]/8)*int(input_shape[0]/8), activation='relu'))
model.add(Reshape((int(input_shape[0]/8), int(input_shape[0]/8), filters[2])))
model.add(Conv2DTranspose(filters[1], 3, strides=2, padding=pad3, activation='relu', name='deconv3'))
model.add(Conv2DTranspose(filters[0], 5, strides=2, padding='same', activation='relu', name='deconv2'))
model.add(Conv2DTranspose(input_shape[2], 5, strides=2, padding='same', name='deconv1'))
model.summary()
return model
if __name__ == "__main__":
from time import time
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train')
parser.add_argument('--dataset', default='usps', choices=['mnist', 'usps'])
parser.add_argument('--n_clusters', default=10, type=int)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--save_dir', default='results/temp', type=str)
args = parser.parse_args()
print(args)
import os
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load dataset
from datasets import load_mnist, load_usps
if args.dataset == 'mnist':
x, y = load_mnist()
elif args.dataset == 'usps':
x, y = load_usps('data/usps')
# define the model
model = CAE(input_shape=x.shape[1:], filters=[32, 64, 128, 10])
plot_model(model, to_file=args.save_dir + '/%s-pretrain-model.png' % args.dataset, show_shapes=True)
model.summary()
# compile the model and callbacks
optimizer = 'adam'
model.compile(optimizer=optimizer, loss='mse')
from keras.callbacks import CSVLogger
csv_logger = CSVLogger(args.save_dir + '/%s-pretrain-log.csv' % args.dataset)
# begin training
t0 = time()
model.fit(x, x, batch_size=args.batch_size, epochs=args.epochs, callbacks=[csv_logger])
print('Training time: ', time() - t0)
model.save(args.save_dir + '/%s-pretrain-model-%d.h5' % (args.dataset, args.epochs))
# extract features
feature_model = Model(inputs=model.input, outputs=model.get_layer(name='embedding').output)
features = feature_model.predict(x)
print('feature shape=', features.shape)
# use features for clustering
from sklearn.cluster import KMeans
km = KMeans(n_clusters=args.n_clusters)
features = np.reshape(features, newshape=(features.shape[0], -1))
pred = km.fit_predict(features)
from . import metrics
print('acc=', metrics.acc(y, pred), 'nmi=', metrics.nmi(y, pred), 'ari=', metrics.ari(y, pred))
| 3,398 | 38.068966 | 121 | py |
DCEC | DCEC-master/datasets.py | import numpy as np
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x = np.concatenate((x_train, x_test))
y = np.concatenate((y_train, y_test))
x = x.reshape(-1, 28, 28, 1).astype('float32')
x = x/255.
print('MNIST:', x.shape)
return x, y
def load_usps(data_path='./data/usps'):
import os
if not os.path.exists(data_path+'/usps_train.jf'):
if not os.path.exists(data_path+'/usps_train.jf.gz'):
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s' % data_path)
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s' % data_path)
os.system('gunzip %s/usps_train.jf.gz' % data_path)
os.system('gunzip %s/usps_test.jf.gz' % data_path)
with open(data_path + '/usps_train.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_train, labels_train = data[:, 1:], data[:, 0]
with open(data_path + '/usps_test.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_test, labels_test = data[:, 1:], data[:, 0]
x = np.concatenate((data_train, data_test)).astype('float32')
x /= 2.0
x = x.reshape([-1, 16, 16, 1])
y = np.concatenate((labels_train, labels_test))
print('USPS samples', x.shape)
return x, y
| 1,619 | 33.468085 | 113 | py |
DCEC | DCEC-master/DCEC.py | from time import time
import numpy as np
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.models import Model
from keras.utils.vis_utils import plot_model
from sklearn.cluster import KMeans
import metrics
from ConvAE import CAE
class ClusteringLayer(Layer):
"""
Clustering layer converts input sample (feature) to soft label, i.e. a vector that represents the probability of the
sample belonging to each cluster. The probability is calculated with student's t-distribution.
# Example
```
model.add(ClusteringLayer(n_clusters=10))
```
# Arguments
n_clusters: number of clusters.
weights: list of Numpy array with shape `(n_clusters, n_features)` witch represents the initial cluster centers.
alpha: parameter in Student's t-distribution. Default to 1.0.
# Input shape
2D tensor with shape: `(n_samples, n_features)`.
# Output shape
2D tensor with shape: `(n_samples, n_clusters)`.
"""
def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(ClusteringLayer, self).__init__(**kwargs)
self.n_clusters = n_clusters
self.alpha = alpha
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
self.clusters = self.add_weight((self.n_clusters, input_dim), initializer='glorot_uniform', name='clusters')
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, inputs, **kwargs):
""" student t-distribution, as same as used in t-SNE algorithm.
q_ij = 1/(1+dist(x_i, u_j)^2), then normalize it.
Arguments:
inputs: the variable containing data, shape=(n_samples, n_features)
Return:
q: student's t-distribution, or soft labels for each sample. shape=(n_samples, n_clusters)
"""
q = 1.0 / (1.0 + (K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2) / self.alpha))
q **= (self.alpha + 1.0) / 2.0
q = K.transpose(K.transpose(q) / K.sum(q, axis=1))
return q
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return input_shape[0], self.n_clusters
def get_config(self):
config = {'n_clusters': self.n_clusters}
base_config = super(ClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DCEC(object):
def __init__(self,
input_shape,
filters=[32, 64, 128, 10],
n_clusters=10,
alpha=1.0):
super(DCEC, self).__init__()
self.n_clusters = n_clusters
self.input_shape = input_shape
self.alpha = alpha
self.pretrained = False
self.y_pred = []
self.cae = CAE(input_shape, filters)
hidden = self.cae.get_layer(name='embedding').output
self.encoder = Model(inputs=self.cae.input, outputs=hidden)
# Define DCEC model
clustering_layer = ClusteringLayer(self.n_clusters, name='clustering')(hidden)
self.model = Model(inputs=self.cae.input,
outputs=[clustering_layer, self.cae.output])
def pretrain(self, x, batch_size=256, epochs=200, optimizer='adam', save_dir='results/temp'):
print('...Pretraining...')
self.cae.compile(optimizer=optimizer, loss='mse')
from keras.callbacks import CSVLogger
csv_logger = CSVLogger(args.save_dir + '/pretrain_log.csv')
# begin training
t0 = time()
self.cae.fit(x, x, batch_size=batch_size, epochs=epochs, callbacks=[csv_logger])
print('Pretraining time: ', time() - t0)
self.cae.save(save_dir + '/pretrain_cae_model.h5')
print('Pretrained weights are saved to %s/pretrain_cae_model.h5' % save_dir)
self.pretrained = True
def load_weights(self, weights_path):
self.model.load_weights(weights_path)
def extract_feature(self, x): # extract features from before clustering layer
return self.encoder.predict(x)
def predict(self, x):
q, _ = self.model.predict(x, verbose=0)
return q.argmax(1)
@staticmethod
def target_distribution(q):
weight = q ** 2 / q.sum(0)
return (weight.T / weight.sum(1)).T
def compile(self, loss=['kld', 'mse'], loss_weights=[1, 1], optimizer='adam'):
self.model.compile(loss=loss, loss_weights=loss_weights, optimizer=optimizer)
def fit(self, x, y=None, batch_size=256, maxiter=2e4, tol=1e-3,
update_interval=140, cae_weights=None, save_dir='./results/temp'):
print('Update interval', update_interval)
save_interval = x.shape[0] / batch_size * 5
print('Save interval', save_interval)
# Step 1: pretrain if necessary
t0 = time()
if not self.pretrained and cae_weights is None:
print('...pretraining CAE using default hyper-parameters:')
print(' optimizer=\'adam\'; epochs=200')
self.pretrain(x, batch_size, save_dir=save_dir)
self.pretrained = True
elif cae_weights is not None:
self.cae.load_weights(cae_weights)
print('cae_weights is loaded successfully.')
# Step 2: initialize cluster centers using k-means
t1 = time()
print('Initializing cluster centers with k-means.')
kmeans = KMeans(n_clusters=self.n_clusters, n_init=20)
self.y_pred = kmeans.fit_predict(self.encoder.predict(x))
y_pred_last = np.copy(self.y_pred)
self.model.get_layer(name='clustering').set_weights([kmeans.cluster_centers_])
# Step 3: deep clustering
# logging file
import csv, os
if not os.path.exists(save_dir):
os.makedirs(save_dir)
logfile = open(save_dir + '/dcec_log.csv', 'w')
logwriter = csv.DictWriter(logfile, fieldnames=['iter', 'acc', 'nmi', 'ari', 'L', 'Lc', 'Lr'])
logwriter.writeheader()
t2 = time()
loss = [0, 0, 0]
index = 0
for ite in range(int(maxiter)):
if ite % update_interval == 0:
q, _ = self.model.predict(x, verbose=0)
p = self.target_distribution(q) # update the auxiliary target distribution p
# evaluate the clustering performance
self.y_pred = q.argmax(1)
if y is not None:
acc = np.round(metrics.acc(y, self.y_pred), 5)
nmi = np.round(metrics.nmi(y, self.y_pred), 5)
ari = np.round(metrics.ari(y, self.y_pred), 5)
loss = np.round(loss, 5)
logdict = dict(iter=ite, acc=acc, nmi=nmi, ari=ari, L=loss[0], Lc=loss[1], Lr=loss[2])
logwriter.writerow(logdict)
print('Iter', ite, ': Acc', acc, ', nmi', nmi, ', ari', ari, '; loss=', loss)
# check stop criterion
delta_label = np.sum(self.y_pred != y_pred_last).astype(np.float32) / self.y_pred.shape[0]
y_pred_last = np.copy(self.y_pred)
if ite > 0 and delta_label < tol:
print('delta_label ', delta_label, '< tol ', tol)
print('Reached tolerance threshold. Stopping training.')
logfile.close()
break
# train on batch
if (index + 1) * batch_size > x.shape[0]:
loss = self.model.train_on_batch(x=x[index * batch_size::],
y=[p[index * batch_size::], x[index * batch_size::]])
index = 0
else:
loss = self.model.train_on_batch(x=x[index * batch_size:(index + 1) * batch_size],
y=[p[index * batch_size:(index + 1) * batch_size],
x[index * batch_size:(index + 1) * batch_size]])
index += 1
# save intermediate model
if ite % save_interval == 0:
# save DCEC model checkpoints
print('saving model to:', save_dir + '/dcec_model_' + str(ite) + '.h5')
self.model.save_weights(save_dir + '/dcec_model_' + str(ite) + '.h5')
ite += 1
# save the trained model
logfile.close()
print('saving model to:', save_dir + '/dcec_model_final.h5')
self.model.save_weights(save_dir + '/dcec_model_final.h5')
t3 = time()
print('Pretrain time: ', t1 - t0)
print('Clustering time:', t3 - t1)
print('Total time: ', t3 - t0)
if __name__ == "__main__":
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train')
parser.add_argument('dataset', default='mnist', choices=['mnist', 'usps', 'mnist-test'])
parser.add_argument('--n_clusters', default=10, type=int)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--maxiter', default=2e4, type=int)
parser.add_argument('--gamma', default=0.1, type=float,
help='coefficient of clustering loss')
parser.add_argument('--update_interval', default=140, type=int)
parser.add_argument('--tol', default=0.001, type=float)
parser.add_argument('--cae_weights', default=None, help='This argument must be given')
parser.add_argument('--save_dir', default='results/temp')
args = parser.parse_args()
print(args)
import os
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load dataset
from datasets import load_mnist, load_usps
if args.dataset == 'mnist':
x, y = load_mnist()
elif args.dataset == 'usps':
x, y = load_usps('data/usps')
elif args.dataset == 'mnist-test':
x, y = load_mnist()
x, y = x[60000:], y[60000:]
# prepare the DCEC model
dcec = DCEC(input_shape=x.shape[1:], filters=[32, 64, 128, 10], n_clusters=args.n_clusters)
plot_model(dcec.model, to_file=args.save_dir + '/dcec_model.png', show_shapes=True)
dcec.model.summary()
# begin clustering.
optimizer = 'adam'
dcec.compile(loss=['kld', 'mse'], loss_weights=[args.gamma, 1], optimizer=optimizer)
dcec.fit(x, y=y, tol=args.tol, maxiter=args.maxiter,
update_interval=args.update_interval,
save_dir=args.save_dir,
cae_weights=args.cae_weights)
y_pred = dcec.y_pred
print('acc = %.4f, nmi = %.4f, ari = %.4f' % (metrics.acc(y, y_pred), metrics.nmi(y, y_pred), metrics.ari(y, y_pred)))
| 11,131 | 40.849624 | 122 | py |
evaluation-autoguide | evaluation-autoguide-main/utils.py | import os
import numpy, numpyro, pyro
import pathlib
from typing import Any, Dict, IO
from dataclasses import dataclass, field
from pandas import DataFrame, Series
from posteriordb import PosteriorDatabase
from os.path import splitext, basename
from itertools import product
from cmdstanpy import CmdStanModel
from stannumpyro.dppl import NumPyroModel, compile as compile_numpyro
from stanpyro.dppl import PyroModel, compile as compile_pyro
import jax.random
def _valid_ref(pdb, name):
"""
Test if reference exists in PosteriorDB
"""
try:
posterior = pdb.posterior(name)
posterior.reference_draws_info()
return True
except Exception:
return False
pdb_root = "./posteriordb"
pdb_path = os.path.join(pdb_root, "posterior_database")
my_pdb = PosteriorDatabase(pdb_path)
golds = [x for x in my_pdb.posterior_names() if _valid_ref(my_pdb, x)]
def get_posterior(name):
return my_pdb.posterior(name)
def summary(samples):
if isinstance(samples, list):
# Multiple chains
assert len(samples) > 0
res = samples[0]
for c in samples[1:]:
res = {k: v + c[k] for k, v in res.items()}
else:
# Only one chain
assert isinstance(samples, dict)
res = samples
res = {k: numpy.array(v) for k, v in res.items()}
summary_dict = numpyro.diagnostics.summary(res, group_by_chain=False)
columns = list(summary_dict.values())[0].keys()
index = []
rows = []
for name, stats_dict in summary_dict.items():
shape = stats_dict["mean"].shape
if len(shape) == 0:
index.append(name)
rows.append(stats_dict.values())
else:
for idx in product(*map(range, shape)):
idx_str = "[{}]".format(",".join(map(str, idx)))
index.append(name + idx_str)
rows.append([v[idx] for v in stats_dict.values()])
return DataFrame(rows, columns=columns, index=index)
def gold_summary(posterior):
samples = posterior.reference_draws()
return summary(samples)
def compile_pyro_model(posterior, backend, mode):
model = posterior.model
stanfile = model.code_file_path("stan")
build_dir = f"_build_{backend}_{mode}"
if not os.path.exists(build_dir):
os.makedirs(build_dir)
pathlib.Path(f"{build_dir}/__init__.py").touch()
if backend == "numpyro":
compile_numpyro(mode, stanfile, build_dir=build_dir)
else:
compile_pyro(mode, stanfile, build_dir=build_dir)
def compile_stan_model(posterior):
stanfile = posterior.model.code_file_path(framework="stan")
_ = CmdStanModel(stan_file=stanfile)
def compile_model(*, posterior, backend, mode):
if backend == "stan":
compile_stan_model(posterior)
else:
compile_pyro_model(posterior, backend, mode)
| 2,859 | 27.888889 | 73 | py |
evaluation-autoguide | evaluation-autoguide-main/eval.py | import logging, datetime, os, sys, traceback, re, argparse
import numpyro
import jax
from stannumpyro.dppl import NumPyroModel
from numpyro.infer import Trace_ELBO
from numpyro.optim import Adam
import numpyro.infer.autoguide as autoguide
from utils import (
compile_model,
get_posterior,
summary,
golds,
)
from cmdstanpy import CmdStanModel
import pandas as pd
import numpy
logger = logging.getLogger(__name__)
def run_advi(*, posterior, mode, num_steps, num_samples):
model = posterior.model
data = posterior.data.values()
stanfile = model.code_file_path("stan")
sm = CmdStanModel(stan_file=stanfile)
fit = sm.variational(
iter=num_steps, algorithm=mode, output_samples=num_samples, data=data
)
return fit
def run_svi(*, posterior, backend, mode, Autoguide, num_steps, num_samples):
"""
Compile and run the model.
Returns the summary Dataframe
"""
model = posterior.model
data = posterior.data.values()
stanfile = model.code_file_path("stan")
build_dir = f"_build_{backend}_{mode}"
numpyro_model = NumPyroModel(stanfile, recompile=False, build_dir=build_dir)
optim = Adam(step_size=0.0005)
loss = Trace_ELBO()
guide = Autoguide(numpyro_model.get_model())
svi = numpyro_model.svi(optim, loss, guide)
svi.run(jax.random.PRNGKey(0), data, num_steps=num_steps, num_samples=num_samples)
return svi
def compare(*, posterior, backend, mode, Autoguide, num_steps, num_samples, logfile):
"""
Compare gold standard with model.
"""
logger.info(f"Processing {posterior.name}")
sg = summary(posterior.reference_draws())
if backend == "stan":
fit = run_advi(
posterior=posterior, mode=mode, num_steps=num_steps, num_samples=num_samples
)
samples = {
k: numpy.array(fit.variational_sample[i])
for i, k in enumerate(fit.column_names)
}
sm = summary(samples)
sm = sm[~sm.index.str.endswith("__")]
sm = sm.rename(columns={"Mean": "mean", "StdDev": "std", "N_Eff": "n_eff"})
else:
svi = run_svi(
posterior=posterior,
backend=backend,
mode=mode,
Autoguide=Autoguide,
num_steps=num_steps,
num_samples=num_samples,
)
sm = svi.summary()
if not set(sg.index).issubset(set(sm.index)):
raise RuntimeError("Missing parameter")
# perf_cmdstan condition: err > 0.0001 and (err / stdev) > 0.3
sm = sm.loc[sg.index]
sm = sm[["mean", "std", "n_eff"]]
sm["err"] = abs(sm["mean"] - sg["mean"])
sm["rel_err"] = sm["err"] / sg["std"]
comp = sm[(sm["err"] > 0.0001) & (sm["rel_err"] > 0.3)].dropna()
if not comp.empty:
logger.error(f"Failed {posterior.name}")
print(f"{name},mismatch,{sm['rel_err'].max(skipna=False)},{sm['n_eff'].mean(skipna=False)}", file=logfile, flush=True)
else:
logger.info(f"Success {posterior.name}")
print(f"{name},success,{sm['rel_err'].max(skipna=False)},{sm['n_eff'].mean(skipna=False)}", file=logfile, flush=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run autoguide accuracy experiment on PosteriorDB models."
)
parser.add_argument(
"--backend",
help="inference backend (numpyro, or stan)",
required=True,
)
parser.add_argument(
"--mode",
help="compilation mode for NumPyro (generative, comprehensive, mixed), algo for Stan (fullrank or meanfield)",
required=True,
)
parser.add_argument(
"--test",
help="run test experiment (steps = 100, samples = 100)",
action="store_true",
)
parser.add_argument(
"--posteriors", nargs="+", help="select the examples to execute"
)
parser.add_argument(
"--guide",
help="autoguide (http://num.pyro.ai/en/latest/autoguide.html)",
default="AutoNormal",
)
# Override posteriorDB configs
parser.add_argument("--steps", type=int, help="number of svi steps")
parser.add_argument("--samples", type=int, help="number of samples")
args = parser.parse_args()
if args.posteriors:
assert all(p in golds for p in args.posteriors), "Bad posterior name"
golds = args.posteriors
logging.basicConfig(level=logging.INFO)
numpyro.set_host_device_count(20)
if not os.path.exists("logs"):
os.makedirs("logs")
today = datetime.datetime.now()
logpath = f"logs/status_svi_{args.backend}_{args.mode}"
if args.backend != "stan":
logpath += f"_{args.guide}"
logpath += f"_{today.strftime('%y%m%d_%H%M%S')}.csv"
with open(logpath, "a") as logfile:
print(",status,rel_err,n_eff,exception", file=logfile, flush=True)
for name in (n for n in golds):
# Configurations
posterior = get_posterior(name)
if args.test:
args.steps = 100
args.samples = 100
if args.steps is None:
args.steps = 100000
if args.samples is None:
args.samples = posterior.reference_draws_info()["diagnostics"]["ndraws"]
try:
# Compile
compile_model(posterior=posterior, backend=args.backend, mode=args.mode)
# Run and Compare
compare(
posterior=posterior,
backend=args.backend,
mode=args.mode,
Autoguide=getattr(autoguide, args.guide),
num_steps=args.steps,
num_samples=args.samples,
logfile=logfile,
)
except:
exc_type, exc_value, _ = sys.exc_info()
err = " ".join(traceback.format_exception_only(exc_type, exc_value))
err = re.sub(r"[\n\r\",]", " ", err)[:150] + "..."
logger.error(f"Failed {name} with {err}")
print(f'{name},error,,,"{err}"', file=logfile, flush=True)
| 6,107 | 33.314607 | 126 | py |
csshar_tfa | csshar_tfa-main/ssl_training.py | import argparse
from models.dtw import DTWModule
import os
from pytorch_lightning import Trainer, seed_everything
from models.simclr import SimCLR
from models.mlp import LinearClassifier, MLPDropout, ProjectionMLP, MLP
from models.supervised import SupervisedModel
from utils.experiment_utils import generate_experiment_id, load_yaml_to_dict, dict_to_json
from utils.training_utils import (init_datamodule, init_encoder,
init_transforms, nested_to_flat_dict,
setup_callbacks, setup_callbacks_ssl,
setup_loggers, flat_to_nested_dict, init_ssl_pretrained)
def parse_arguments():
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
# configs paths
parser.add_argument('--experiment_config_path', required=True, help='Path to experiment yaml file')
parser.add_argument('--dataset_config_path', default='configs/dataset_configs.yaml', help='Path to datasets yaml file')
parser.add_argument('--augmentations_path', help='Path to augmentations yaml file')
# data and models
parser.add_argument('--dataset', required=True, choices=['uci_har', 'mobi_act', 'usc_had'], help='Dataset name')
parser.add_argument('--framework', default='simclr', choices=['simclr', 'dtw'], help='SSL framework')
parser.add_argument('--model', required=True, choices=['cnn1d', 'transformer'], help='Encoder model')
parser.add_argument('--model_save_path', default='./model_weights', help='Folder for the model weights')
# used to run only in fine tuning mode
parser.add_argument('--linear_eval', action='store_true', help='Flag for using linear evaluation protocol')
parser.add_argument('--supervised', action='store_true', help='Flag for supervised experiments')
parser.add_argument('--fine_tuning', action='store_true', help='Flag for fine-tuning only mode (pre-training is skipped)')
parser.add_argument('--fine_tuning_ckpt_path', help='Path to a pretrained encoder. Required if running with --fine_tuning.')
# other training configs
parser.add_argument('--no_ram', action='store_true', default=False, help='If true, dataset is not first read into RAM')
parser.add_argument('--no_ckpt', action='store_true', default=False, help='Flag for running experiments without saving model weights')
parser.add_argument('--num-workers', default=1, type=int, help='Num workers in dataloaders')
parser.add_argument('--sweep', action='store_true', default=False, help='Set automatically if running in WandB sweep mode. You do not need to set this manually.')
# cross-subject cross-validation
parser.add_argument('--cross_subject_cv', action='store_true', default=False, help='Flag for using cross-subject cross-validation')
parser.add_argument('--num_folds', default=5, help='Number of folds in cross-subject cv')
parser.add_argument('--fine_tuning_ckpt_paths', nargs='+', help='Path to pre-trained encoders if only fine-tuning is needed for cross-subject cv')
# semi-supervised learning
parser.add_argument('--semi_sup', action='store_true', default=False, help='Flag for running semi-supervised learning experiments. Can be combined with --supervised')
parser.add_argument('--semi_sup_runs', default=10, help='Number of SSL runs')
parser.add_argument('--semi_sup_results_path', default='./results/semi_sup', help='Semi-sup results path')
return parser.parse_args()
def ssl_pre_training(args, cfg, dataset_cfg, experiment_id, loggers_list, loggers_dict):
""" Runs SSL pre-training
Parameters
----------
args : argparse.Namespace
arguments parsed from argparse
cfg : dict
experiment configs parsed from the input yaml
dataset_cfg : dict
dataset configs parsed from the input yaml
experiment_id : string
unique experiment name
loggers_list : list
list of loggers
loggers_dict : dict
dictionary with loggers
Returns
-------
Pre-trained encoder model and altered cfg
"""
# seed for pre-training for reproducability
seed_everything(cfg['experiment']['seed'])
# initialize transforms: modailty transforms + random transformations for view generation
num_epochs = cfg['experiment']['num_epochs_ssl']
augmentations_dict = load_yaml_to_dict(args.augmentations_path)
flat_augmentations_dict = nested_to_flat_dict({"augmentations": augmentations_dict})
# config overwriting for sweeps
if args.sweep:
_wandb = loggers_dict['wandb'].experiment
# Take some specific parameters.
num_epochs = _wandb.config["num_epochs_ssl"]
# Take SSL model kwargs and merge with experiment config.
ssl_key_values = {key: _wandb.config[key] for key in _wandb.config.keys() if key.startswith('ssl.')}
ssl_kwargs_dict = flat_to_nested_dict(ssl_key_values)
if ssl_kwargs_dict != {}:
cfg['model']['ssl']['kwargs'] = {**cfg['model']['ssl']['kwargs'], **ssl_kwargs_dict['ssl']}
# Take encoder kwargs and merge with experiment config.
encoder_key_values = {key: _wandb.config[key] for key in _wandb.config.keys() if key.startswith('model.')}
encoder_kwargs_dict = flat_to_nested_dict(encoder_key_values)
if encoder_kwargs_dict != {}:
cfg['model'][args.model]['kwargs'] = {**cfg['model'][args.model]['kwargs'], **encoder_kwargs_dict['model']}
# Take augmentation config from sweep and merge with default config.
augmentation_key_values = {key: _wandb.config[key] for key in _wandb.config.keys() if key.startswith('augmentations.')}
flat_augmentations_dict = {**flat_augmentations_dict, **augmentation_key_values}
augmentations_dict = flat_to_nested_dict(flat_augmentations_dict)['augmentations']
# init transforms
train_transforms, test_transforms = init_transforms(augmentations_dict)
# init datamodule with ssl flag
datamodule = init_datamodule(dataset_cfg[args.dataset]['train'], dataset_cfg[args.dataset]['val'], dataset_cfg[args.dataset]['test'],
batch_size=cfg['model']['ssl']['kwargs']['ssl_batch_size'], train_transforms=train_transforms, test_transforms=test_transforms, ssl=True, n_views=2, num_workers=args.num_workers, store_in_ram = not args.no_ram)
# initialize encoder, projection and ssl framework model
encoder = init_encoder(cfg['model'][args.model])
projection = ProjectionMLP(encoder.out_size, cfg['model']['ssl']['kwargs']['projection_hidden'], cfg['model']['ssl']['kwargs']['embedding_size'])
if args.framework == 'simclr':
model = SimCLR(encoder, projection, **cfg['model']['ssl']['kwargs'])
elif args.framework == 'dtw':
model = DTWModule(encoder, projection, **cfg['model']['ssl']['kwargs'])
# init callbacks
callbacks = setup_callbacks_ssl(
no_ckpt = args.no_ckpt,
model_weights_path = args.model_save_path,
dataset = args.dataset,
model = args.model,
experiment_id = experiment_id,
online_eval = False,
online_eval_args = None
)
# initialize trainer and fit the ssl model
trainer = Trainer.from_argparse_args(args=args, logger=loggers_list, gpus=1, deterministic=True, max_epochs=num_epochs, default_root_dir='logs',
callbacks=callbacks, checkpoint_callback=not args.no_ckpt)
# train the model
trainer.fit(model, datamodule)
return model.encoder, cfg
def fine_tuning(args, cfg, dataset_cfg, encoder, loggers_list, loggers_dict, experiment_id, limited_k=None, ft=True):
""" Fine-tunes and tests an output model and freezes the provided encoder. If supervised argument is True, encoder is fine-tuned as well.
Parameters
----------
args : argparse.Namespace
arguments parsed from argparse
cfg : dict
experiment configs parsed from the input yaml
dataset_cfg : dict
dataset configs parsed from the input yaml
encoder : pytorch_lightning.core.lightning.LightningModule OR torch.nn.Module
pytorch encoder
loggers_list : list
list of loggers
loggers_dict : dict
dictionary with loggers
experiment_id : string
unique experiment name
limited_k : int
Only for semi-sup: number of training data instances per class available for training
ft : bool
fine-tuning flag: if ft is True -> encoder is frozen, if ft is False -> encoder is tuned as well
Returns
-------
Dictionary with metrics and their values
"""
if not args.semi_sup:
seed_everything(cfg['experiment']['seed']) # reset seed for consistency in results
batch_size = cfg['experiment']['batch_size_fine_tuning']
num_epochs = cfg['experiment']['num_epochs_fine_tuning']
# if using wandb and performing a sweep, overwrite some config params with the sweep params.
if args.sweep:
_wandb = loggers_dict['wandb'].experiment
batch_size = _wandb.config["batch_size_fine_tuning"]
num_epochs = _wandb.config["num_epochs_fine_tuning"]
# initialize classifier and the whole model
if args.linear_eval:
classifier = LinearClassifier(encoder.out_size, dataset_cfg[args.dataset]['n_classes'])
else:
classifier = MLPDropout(encoder.out_size, dataset_cfg[args.dataset]['n_classes'])
model = SupervisedModel(encoder, classifier, fine_tuning=ft, metric_scheduler=dataset_cfg[args.dataset]['main_metric'], lr=cfg['model'][args.model]['kwargs']['lr'])
# setup callbacks
callbacks = setup_callbacks(
early_stopping_metric = "val_f1-score",
early_stopping_mode = "max",
class_names = dataset_cfg[args.dataset]["class_names"],
num_classes = len(dataset_cfg[args.dataset]["class_names"]),
no_ckpt = args.no_ckpt,
model_weights_path = args.model_save_path,
metric = 'val_' + dataset_cfg[args.dataset]['main_metric'],
dataset = args.dataset,
model = 'ssl_finetuned_' + args.framework + '_' + args.model,
experiment_id = experiment_id
)
# init datamodule
datamodule = init_datamodule(dataset_cfg[args.dataset]['train'], dataset_cfg[args.dataset]['val'], dataset_cfg[args.dataset]['test'],
batch_size=batch_size, num_workers=args.num_workers, limited_k=limited_k, store_in_ram = not args.no_ram)
# init trainer, run training (fine-tuning) and test
trainer = Trainer.from_argparse_args(args=args, logger=loggers_list, gpus=1, deterministic=True, max_epochs=num_epochs, default_root_dir='logs',
callbacks=callbacks, checkpoint_callback=not args.no_ckpt)
trainer.fit(model, datamodule)
trainer.test(model, datamodule, ckpt_path='best')
# compile metrics
metrics = {metric: float(val) for metric, val in trainer.callback_metrics.items()}
# close wandb experiment logging
if 'wandb' in loggers_dict:
loggers_dict['wandb'].experiment.finish()
return metrics
def init_loggers(args, cfg, experiment_id, fine_tune_only=False, approach='simclr'):
""" Initialize the loggers based on the experiment configs. Typically creates wandb and tensorboard loggers.
Parameters
----------
args : argparse.Namespace
arguments parsed from argparse
cfg : dict
experiment configs parsed from the input yaml
experiment_id : string
unique experiment name
fine_tuning_only : bool
flag for fine-tuning without pre-training
approach : string
framework
Returns
-------
Dictionary with metrics and their values
"""
experiment_info = { # default values; may be overrided by sweep config
"dataset": args.dataset,
"model": cfg['model'][args.model]['encoder_class_name'],
"seed": cfg['experiment']['seed']
}
# overwrite configs for sweeps
if not fine_tune_only:
num_epochs = cfg['experiment']['num_epochs_ssl']
if args.augmentations_path is not None:
augmentations_dict = load_yaml_to_dict(args.augmentations_path)
flat_augmentations_dict = nested_to_flat_dict({"augmentations": augmentations_dict}) # need flat structure for wandb sweep to properly overwrite it
else:
flat_augmentations_dict = {}
additional_info = { # default values; may be overrided by sweep config
"ssl_framework": args.framework,
"num_epochs_ssl": num_epochs,
"num_epochs_fine_tuning": cfg['experiment']['num_epochs_fine_tuning'],
"batch_size_fine_tuning": cfg['experiment']['batch_size_fine_tuning'],
**flat_augmentations_dict,
}
experiment_info = {**experiment_info, **additional_info}
loggers_list, loggers_dict = setup_loggers(tb_dir="tb_logs", experiment_info=experiment_info, dataset=args.dataset,
experiment_id=experiment_id, experiment_config_path=args.experiment_config_path, approach=approach)
return loggers_list, loggers_dict
def run_one_experiment(args, cfg, dataset_cfg, limited_k=None):
""" Runs one experiment with settings from passed arguments and configs
Parameters
----------
args : argparse.Namespace
arguments parsed from argparse
cfg : dict
experiment configs parsed from the input yaml
dataset_cfg : dict
dataset configs parsed from the input yaml
limited_k : int
Only for semi-sup: number of training data instances per class available for training
Returns
-------
Dictionary with metrics and their values
"""
experiment_id = generate_experiment_id()
if args.supervised:
approach = 'supervised'
else:
approach = 'ssl'
if args.cross_subject_cv:
approach += '_cscv'
if args.semi_sup:
approach += '_semi_sup'
loggers_list, loggers_dict = init_loggers(args, cfg, experiment_id, fine_tune_only=False, approach=approach)
### ssl full pre-training + fine_tuning
if not (args.supervised or args.fine_tuning):
encoder, cfg = ssl_pre_training(args, cfg, dataset_cfg, experiment_id, loggers_list, loggers_dict)
result_metrics = fine_tuning(args, cfg, dataset_cfg, encoder, loggers_list, loggers_dict, experiment_id, limited_k=limited_k)
### fine-tuning or supervised training
else:
model_cfg = cfg['model'][args.model]
model_cfg['kwargs'] = {**dataset_cfg, **model_cfg['kwargs']}
if args.fine_tuning:
pre_trained_model = init_ssl_pretrained(model_cfg, args.fine_tuning_ckpt_path, cfg['model']['ssl']['kwargs']['projection_hidden'], dataset_cfg[args.dataset]['n_classes'])
encoder = getattr(pre_trained_model, 'encoder')
elif args.supervised:
encoder = init_encoder(model_cfg)
ft = not args.supervised
result_metrics = fine_tuning(args, cfg, dataset_cfg, encoder, loggers_list, loggers_dict, experiment_id, ft=ft, limited_k=limited_k)
return result_metrics
def validate_args(args):
if args.fine_tuning and not (args.fine_tuning_ckpt_path or args.fine_tuning_ckpt_paths):
print("Need to provide --fine_tuning_ckpt_path if running with --fine_tuning!")
exit(1)
def main():
# parse cli arguments and configs
args = parse_arguments()
validate_args(args)
cfg = load_yaml_to_dict(args.experiment_config_path)
dataset_cfg = load_yaml_to_dict(args.dataset_config_path)
if args.supervised:
args.framework = 'supervised'
# cross-subject cross-validation
if args.cross_subject_cv:
for i in range(1, args.num_folds + 1):
fold_path = os.path.join(dataset_cfg[args.dataset]['cross-subject'], 'fold{}'.format(i))
dataset_cfg[args.dataset]['train'] = os.path.join(fold_path, 'train')
dataset_cfg[args.dataset]['val'] = os.path.join(fold_path, 'val')
dataset_cfg[args.dataset]['test'] = os.path.join(fold_path, 'test')
if args.fine_tuning:
args.fine_tuning_ckpt_path = args.fine_tuning_ckpt_paths[i - 1]
print(dataset_cfg)
run_one_experiment(args, cfg, dataset_cfg)
# semi-supervised learning scenarios
if args.semi_sup:
results = {}
for k in [1, 2, 5, 10, 25, 50, 100]:
results[str(k)] = {}
for i in range(args.semi_sup_runs):
results[str(k)]['trial_{}'.format(i)] = {}
metrics = run_one_experiment(args, cfg, dataset_cfg, limited_k=k)
results[str(k)]['trial_{}'.format(i)][args.framework] = metrics['test_f1-score']
dict_to_json(results, args.semi_sup_results_path + '_' + args.framework + '.json')
# single pre-training and fine-tuning experiment (feature representation learning -- when fine-tuning is done on the whole train set)
else:
run_one_experiment(args, cfg, dataset_cfg)
if __name__ == '__main__':
main()
| 17,219 | 45.540541 | 218 | py |
csshar_tfa | csshar_tfa-main/callbacks/log_confusion_matrix.py | import pytorch_lightning as pl
import pytorch_lightning.loggers as loggers
import wandb
class LogConfusionMatrix(pl.Callback):
"""
A callback which caches all labels and predictions encountered during a testing epoch,
then logs a confusion matrix to WandB at the end of the test.
"""
def __init__(self, class_names):
self.class_names = class_names
self._reset_state()
def _reset_state(self):
self.labels = []
self.preds = []
def on_test_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._reset_state()
def on_test_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch, batch_idx, dataloader_idx) -> None:
self.labels += (batch[1]).tolist()
self.preds += outputs['preds'].tolist()
def on_test_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
# Retrieve the WandB logger, if it exists.
wandb_logger = None
for logger in trainer.logger:
if isinstance(logger, loggers.WandbLogger):
wandb_logger = logger
if wandb_logger == None:
return
# Log the confusion matrix.
confusion_matrix = wandb.plot.confusion_matrix(
y_true = self.labels,
preds = self.preds,
class_names = self.class_names
)
wandb_logger.experiment.log({
"confusion_matrix": confusion_matrix,
"global_step": trainer.global_step
})
| 1,562 | 34.522727 | 139 | py |
csshar_tfa | csshar_tfa-main/callbacks/log_classifier_metrics.py | import pytorch_lightning as pl
from torch import nn
import torch
import torchmetrics
class LogClassifierMetrics(pl.Callback):
"""
A callback which logs one or more classifier-specific metrics at the end of each
validation and test epoch, to all available loggers.
The available metrics are: accuracy, precision, recall, F1-score.
"""
def __init__(self, num_classes, metric_names=['accuracy', 'f1-score', 'precision', 'recall'], average='micro'):
self.metric_names = metric_names
self.metric_dict = nn.ModuleDict({
'accuracy': torchmetrics.Accuracy(),
'f1-score': torchmetrics.F1(num_classes=num_classes, average=average),
'precision': torchmetrics.Precision(num_classes=num_classes, average=average),
'recall': torchmetrics.Recall(num_classes=num_classes, average=average)
})
self._reset_state()
def _reset_state(self):
self.labels = []
self.preds = []
def on_test_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._reset_state()
def on_validation_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._reset_state()
def _cache_preds_labels(self, outputs, batch):
self.labels += (batch[1]).tolist()
self.preds += outputs['preds'].tolist()
def on_validation_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch, batch_idx, dataloader_idx) -> None:
self._cache_preds_labels(outputs, batch)
def on_test_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch, batch_idx, dataloader_idx) -> None:
self._cache_preds_labels(outputs, batch)
def _shared_eval(self, trainer, prefix):
labels_tensor = torch.Tensor(self.labels).int()
preds_tensor = torch.Tensor(self.preds).int()
for metric_name in self.metric_names:
if metric_name in self.metric_dict:
metric_val = self.metric_dict[metric_name](preds_tensor, labels_tensor)
self.log(f"{prefix}_{metric_name}", metric_val)
def on_validation_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._shared_eval(trainer, "val")
def on_test_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._shared_eval(trainer, "test")
| 2,465 | 43.035714 | 145 | py |
csshar_tfa | csshar_tfa-main/models/attention_lstm.py | import numpy as np
from torch import nn
import torch
import torch.nn.functional as F
from .mlp import ProjectionMLP_SimCLR, SimSiamMLP
class AttnLSTM(nn.Module):
def __init__(self,
input_dim,
hidden_dim,
output_dim,
n_layers=1,
sensor_attention=False,
temporal_attention=False,
return_weights=False,
norm_out=False,
get_lstm_features=False,
initialize_lstm=False,
fc_size=256,
supervised=True,
framework = 'SimCLR'):
super(AttnLSTM, self).__init__()
self.name = 'attention_lstm'
self.framework = framework
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.return_weights = return_weights
self.sensor_attention = sensor_attention
self.temporal_attention = temporal_attention
self.n_layers = n_layers
self.supervised = supervised
if sensor_attention:
self.sens_attn_layer = SensorAttention(self.input_dim)
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.n_layers)
if temporal_attention:
self.temp_attn_layer = TemporalAttention(self.hidden_dim)
if self.supervised:
self.linear = nn.Linear(self.hidden_dim, self.output_dim)
else:
if self.framework == 'SimCLR':
self.projection = ProjectionMLP_SimCLR(self.hidden_dim, fc_size)
elif self.framework == 'SimSiam':
self.projection = ProjectionMLP_SimSiam(self.hidden_dim, fc_size)
self.prediction = PredictionMLP(fc_size, int(fc_size / 2))
self.norm_out = norm_out
self.get_lstm_features = get_lstm_features
self.initialize_lstm = initialize_lstm
def forward(self, x, projection_head=True, prediction=False, hidden=None):
if self.sensor_attention:
x, sensor_weights = self.sens_attn_layer(x)
else:
sensor_weights = None
if self.initialize_lstm:
hidden_states, _ = self.lstm(x, hidden)
else:
hidden_states, _ = self.lstm(x)
if self.temporal_attention:
hidden_states, temporal_weights = self.temp_attn_layer(hidden_states)
else:
hidden_states = hidden_states[-1]
temporal_weights = None
if self.supervised:
x = self.linear(hidden_states)
elif projection_head:
x = self.projection(hidden_states)
if prediction:
x = self.prediction(x)
else:
x = hidden_states
if self.norm_out:
x = F.normalize(x, p=2, dim=1)
if self.return_weights:
return x, sensor_weights, temporal_weights
return x
class TemporalAttention(nn.Module):
def __init__(self, input_dim):
super(TemporalAttention, self).__init__()
self.linear_s = nn.Linear(input_dim, input_dim)
def forward(self, hidden_states):
scores = []
for i, hidden in enumerate(hidden_states):
int_vector = self.linear_s(hidden)
tmp_score = torch.bmm(hidden_states[-1].unsqueeze(1), int_vector.unsqueeze(2))
scores.append(tmp_score.squeeze())
scores = torch.stack(scores)
weights = F.softmax(scores, dim=0).unsqueeze(-1)
hidden_out = torch.sum(torch.mul(weights, hidden_states), dim=0)
return hidden_out, weights.squeeze().permute(1, 0)
class SensorAttention(nn.Module):
def __init__(self, input_dim):
super(SensorAttention, self).__init__()
self.input_dim = input_dim
self.linear_x = nn.Linear(input_dim, input_dim)
self.linear_b = nn.Linear(input_dim, input_dim)
self.linear_e = nn.Linear(input_dim, input_dim)
def forward(self, sensor_data):
new_signals_arr = []
beta_arr = []
batch_size = sensor_data.shape[1]
tmp_beta = torch.zeros(batch_size, self.input_dim).float().cuda()
for tmp_signal_batch in sensor_data:
signal_out = self.linear_x(tmp_signal_batch)
beta_out = self.linear_b(tmp_beta)
merged = torch.tanh(signal_out + beta_out)
energy = self.linear_e(merged)
tmp_beta = F.softmax(energy, dim=1)
new_signal = torch.mul(tmp_beta, tmp_signal_batch)
beta_arr.append(tmp_beta)
new_signals_arr.append(new_signal)
new_signals = torch.stack(new_signals_arr).cuda()
sens_weights = torch.stack(beta_arr).cuda()
return new_signals, sens_weights.permute(1, 0, 2) | 3,995 | 29.738462 | 81 | py |
csshar_tfa | csshar_tfa-main/models/simclr.py | import torch
import torch.nn.functional as F
from pytorch_lightning.core.lightning import LightningModule
from torch import nn
from apex.parallel.LARC import LARC
class SimCLR(LightningModule):
def __init__(self,
encoder,
projection,
ssl_batch_size=128,
temperature=0.05,
n_views=2,
optimizer_name_ssl='lars',
ssl_lr=0.001,
**kwargs):
super().__init__()
self.encoder = encoder
self.projection = projection
self.optimizer_name_ssl = optimizer_name_ssl
self.lr = ssl_lr
self.loss = NTXent(ssl_batch_size, n_views, temperature)
self.log_hyperparams()
def log_hyperparams(self):
self.hparams['in_channels'] = self.encoder.in_channels
self.hparams['out_channels'] = self.encoder.out_channels
self.hparams['num_head'] = self.encoder.num_head
self.hparams['num_layers'] = self.encoder.num_layers
self.hparams['kernel_size'] = self.encoder.kernel_size
self.hparams['dropout'] = self.encoder.dropout
self.save_hyperparameters(ignore=["batch_size", "n_views"])
def _prepare_batch(self, batch):
batch = torch.cat(batch, dim=0)
if self.encoder.name == 'transformer':
batch = batch.permute(0, 2, 1)
batch = batch.float()
return batch
def forward(self, x):
x = self.encoder(x)
x = nn.Flatten()(x)
x = self.projection(x)
return x
def training_step(self, batch, batch_idx):
batch = self._prepare_batch(batch)
out = self(batch)
loss, pos, neg = self.loss(out)
self.log('ssl_train_loss', loss)
self.log("avg_positive_sim", pos)
self.log("avg_neg_sim", neg)
return loss
def validation_step(self, batch, batch_idx):
batch = self._prepare_batch(batch)
out = self(batch)
loss, _, _ = self.loss(out)
self.log("ssl_val_loss", loss)
def configure_optimizers(self):
return self._initialize_optimizer()
def _initialize_optimizer(self):
if self.optimizer_name_ssl.lower() == 'adam':
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"monitor": 'ssl_train_loss'
}
}
elif self.optimizer_name_ssl.lower() == 'lars':
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
optimizer = LARC(optimizer)
return {
"optimizer": optimizer
}
class NTXent(LightningModule):
def __init__(self, batch_size, n_views=2, temperature=0.1):
super().__init__()
self.batch_size = batch_size
self.n_views = n_views
self.temperature = temperature
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
logits, labels, pos, neg = self.get_infoNCE_logits_labels(x, self.batch_size, self.n_views, self.temperature)
return self.criterion(logits, labels), pos, neg
def get_infoNCE_logits_labels(self, features, batch_size, n_views=2, temperature=0.1):
"""
Implementation from https://github.com/sthalles/SimCLR/blob/master/simclr.py
"""
# creates a vector with labels [0, 1, 2, 0, 1, 2]
labels = torch.cat([torch.arange(batch_size) for i in range(n_views)], dim=0)
# creates matrix where 1 is on the main diagonal and where indexes of the same intances match (e.g. [0, 4][1, 5] for batch_size=3 and n_views=2)
labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
# computes similarity matrix by multiplication, shape: (batch_size * n_views, batch_size * n_views)
similarity_matrix = get_cosine_sim_matrix(features)
# discard the main diagonal from both: labels and similarities matrix
mask = torch.eye(labels.shape[0], dtype=torch.bool)#.to(self.args.device)
# mask out the main diagonal - output has one column less
labels = labels[~mask].view(labels.shape[0], -1)
similarity_matrix_wo_diag = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)
# select and combine multiple positives
positives = similarity_matrix_wo_diag[labels.bool()].view(labels.shape[0], -1)
# select only the negatives
negatives = similarity_matrix_wo_diag[~labels.bool()].view(similarity_matrix_wo_diag.shape[0], -1)
# reshuffles values in each row so that positive similarity value for each row is in the first column
logits = torch.cat([positives, negatives], dim=1)
# labels is a zero vector because all positive logits are in the 0th column
labels = torch.zeros(logits.shape[0])
logits = logits / temperature
return logits, labels.long().to(logits.device), positives.mean(), negatives.mean()
def get_cosine_sim_matrix(features):
features = F.normalize(features, dim=1)
similarity_matrix = torch.matmul(features, features.T)
return similarity_matrix | 5,350 | 37.496403 | 153 | py |
csshar_tfa | csshar_tfa-main/models/supervised.py | from pandas import lreshape
import torch
import torch.nn as nn
from pytorch_lightning.core.lightning import LightningModule
class SupervisedModel(LightningModule):
def __init__(self,
encoder,
classifier,
fine_tuning=False,
optimizer_name='adam',
metric_scheduler='f1-score',
lr=0.001):
super().__init__()
self.save_hyperparameters('optimizer_name', 'lr')
self.encoder = encoder
self.classifier = classifier
self.fine_tuning = fine_tuning
if self.fine_tuning:
for param in self.encoder.parameters():
param.requires_grad = False
self.loss = nn.CrossEntropyLoss()
self.metric_scheduler = metric_scheduler
self.lr = lr
self.optimizer_name = optimizer_name
self.log_hyperparams()
def log_hyperparams(self):
self.hparams['in_channels'] = self.encoder.in_channels
self.hparams['out_channels'] = self.encoder.out_channels
self.hparams['kernel_size'] = self.encoder.kernel_size
# log hyperparameters related to the transformer model only
if self.encoder.name == 'transformer':
self.hparams['num_head'] = self.encoder.num_head
self.hparams['num_layers'] = self.encoder.num_layers
self.hparams['dropout'] = self.encoder.dropout
self.save_hyperparameters("optimizer_name", "lr")
def forward(self, x):
x = self.encoder(x)
x = nn.Flatten()(x)
x = self.classifier(x)
return x
def _prepare_batch(self, batch):
x = batch[0]
y = batch[1].long()
if self.encoder.name in ['cnn1d', 'transformer']:
x = x.permute(0, 2, 1)
x = x.float()
return x, y
def training_step(self, batch, batch_idx):
x, y = self._prepare_batch(batch)
out = self(x)
loss = self.loss(out, y)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
return self._shared_eval(batch, batch_idx, "val")
def test_step(self, batch, batch_idx):
return self._shared_eval(batch, batch_idx, "test")
def _shared_eval(self, batch, batch_idx, prefix):
x, y = self._prepare_batch(batch)
out = self(x)
preds = torch.argmax(out, dim=1)
loss = self.loss(out, y)
self.log(f"{prefix}_loss", loss)
return {f"{prefix}_loss": loss, "preds": preds}
def configure_optimizers(self):
return self._initialize_optimizer()
def _initialize_optimizer(self):
### Add LR Schedulers
if self.optimizer_name.lower() == 'adam':
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=10)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"monitor": '_'.join(['val', self.metric_scheduler])
}
}
| 3,129 | 30.938776 | 110 | py |
csshar_tfa | csshar_tfa-main/models/conv_net.py | import torch.nn as nn
class CNN1D(nn.Module):
def __init__(self,
in_channels,
len_seq=30,
out_channels=[32, 64, 128],
fc_size=256,
kernel_size=3,
stride=1,
padding=1,
pool_padding=0,
pool_size=2,
supervised=True,
relu_type = 'leaky',
**kwargs):
"""
1D-Convolutional Network
"""
super(CNN1D, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.name = 'cnn1d'
self.pool = nn.MaxPool1d(kernel_size=pool_size, stride=None)
self.num_layers = len(out_channels)
if relu_type == 'leaky':
self.relu = nn.LeakyReLU()
else:
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=out_channels[0], kernel_size=kernel_size, stride=stride, padding=padding)
self.batchNorm1 = nn.BatchNorm1d(out_channels[0])
self.conv2 = nn.Conv1d(in_channels=out_channels[0], out_channels=out_channels[1], kernel_size=kernel_size, stride=stride, padding=padding)
self.batchNorm2 = nn.BatchNorm1d(out_channels[1])
self.conv3 = nn.Conv1d(in_channels=out_channels[1], out_channels=out_channels[2], kernel_size=kernel_size, stride=stride, padding=padding)
self.batchNorm3 = nn.BatchNorm1d(out_channels[2])
self.out_size = self._compute_out_size(len_seq, padding, kernel_size, stride, 3, out_channels[-1], pool_size, pool_padding)
@staticmethod
def _compute_out_size(sample_length, padding, kernel_size, stride, num_layers, num_channels, pool_size, pool_padding):
conv_out_size = sample_length
for _ in range(num_layers):
conv_out_size = int((conv_out_size + 2 * padding - (kernel_size - 1) - 1) / stride + 1)
# conv_out_size = int((conv_out_size + 2 * pool_padding - (pool_size - 1) - 1) / pool_size + 1)
return int(num_channels * conv_out_size)
def forward(self, x, skip_last_fc=False):
x = self.relu(self.batchNorm1(self.conv1(x)))
# x = self.pool(x)
x = self.relu(self.batchNorm2(self.conv2(x)))
# x = self.pool(x)
x = self.relu(self.batchNorm3(self.conv3(x)))
# x = self.pool(x)
return x | 2,442 | 39.716667 | 146 | py |
csshar_tfa | csshar_tfa-main/models/mlp.py | import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, in_size, out_size, hidden=[256, 128], relu_type='leaky'):
super().__init__()
self.name = 'MLP'
if relu_type == 'leaky':
self.relu = nn.LeakyReLU(inplace=True)
else:
self.relu = nn.ReLU(inplace=True)
self.linear1 = nn.Sequential(
nn.Linear(in_size, hidden[0]),
nn.BatchNorm1d(hidden[0]),
self.relu
)
self.linear2 = nn.Sequential(
nn.Linear(hidden[0], hidden[1]),
nn.BatchNorm1d(hidden[1]),
self.relu
)
self.output = nn.Linear(hidden[1], out_size)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.output(x)
return x
class LinearClassifier(nn.Module):
def __init__(self, in_size, out_size):
super().__init__()
self.name = 'LinearClassifier'
self.classifier = nn.Linear(in_size, out_size)
def forward(self, x):
x = self.classifier(x)
return x
class ProjectionMLP(nn.Module):
def __init__(self, in_size, fc_size, out_size):
super().__init__()
self.out_size = out_size
self.layer1 = nn.Sequential(
nn.Linear(in_size, fc_size),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(fc_size, out_size)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
class MLPDropout(nn.Module):
def __init__(self, in_size, out_size, hidden=[256, 128]):
super(MLPDropout, self).__init__()
self.name = 'MLP'
self.relu = nn.ReLU()
self.linear1 = nn.Sequential(
nn.Linear(in_size, hidden[0]),
nn.ReLU(inplace=True),
nn.Dropout(0.2)
)
self.linear2 = nn.Sequential(
nn.Linear(hidden[0], hidden[1]),
nn.ReLU(inplace=True),
nn.Dropout(0.2)
)
self.output = nn.Linear(hidden[1], out_size)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.output(x)
return x
| 2,209 | 25.626506 | 80 | py |
csshar_tfa | csshar_tfa-main/models/dtw.py | import torch
import torch.nn.functional as F
from apex.parallel.LARC import LARC
from libraries.pytorch_softdtw_cuda.soft_dtw_cuda import SoftDTW
from pytorch_lightning.core.lightning import LightningModule
from torch import nn
from models.simclr import NTXent
class DTWModule(LightningModule):
"""
Implementation of DTW
"""
def __init__(self, encoder, projection, optimizer_name_ssl='adam', ssl_lr=0.001, alpha=0.1, beta=0.1, gamma=0.5, l=2, sigma=15, n_views=2, ssl_batch_size=64, temperature=0.1, **kwargs):
super().__init__()
# encoder class
self.encoder = encoder
self.projection = projection
# optimization
self.optimizer_name_ssl = optimizer_name_ssl
self.lr = ssl_lr
# DTW-related parameters
self.alpha = alpha
self.l = l
self.sigma = sigma
# Losses
self.dtw_loss = SoftDTW(use_cuda=True, normalize=False, gamma=gamma)
self.ntxent_loss = NTXent(batch_size=ssl_batch_size, n_views=n_views, temperature=temperature)
self.log_hyperparams()
def log_hyperparams(self):
self.hparams['in_channels'] = self.encoder.in_channels
self.hparams['out_channels'] = self.encoder.out_channels
self.hparams['num_head'] = self.encoder.num_head
self.hparams['num_layers'] = self.encoder.num_layers
self.hparams['kernel_size'] = self.encoder.kernel_size
self.hparams['dropout'] = self.encoder.dropout
self.hparams['embedding_size'] = self.projection.out_size
self.save_hyperparameters(ignore=["batch_size", "n_views"])
def _prepare_batch(self, batch):
batch = torch.cat(batch, dim=0)
if self.encoder.name == 'transformer':
batch = batch.permute(0, 2, 1)
batch = batch.float()
return batch
def forward(self, x):
temporal = self.encoder(x)
flattened = nn.Flatten()(temporal)
projected = self.projection(flattened)
return projected, temporal
def training_step(self, batch, batch_idx):
# preprocess batch
batch = self._prepare_batch(batch)
# pass the batch through the model
projected, temporal = self(batch)
first, second = temporal.split(int(temporal.shape[0]/2), dim=0)
# normalize temporal embeddings
first = F.normalize(first, dim=2)
second = F.normalize(second, dim=2)
# compute losses
dtw_loss = self.dtw_loss(first, second).mean()
nt_xent_loss, pos, neg = self.ntxent_loss(projected)
loss = nt_xent_loss + self.alpha * dtw_loss
# log losses and average similarities
self.log("dtw_train_loss", dtw_loss)
self.log("nt_xent_loss", nt_xent_loss)
self.log("avg_positive_sim", pos)
self.log("avg_neg_sim", neg)
self.log("ssl_train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
batch = self._prepare_batch(batch)
projected, temporal = self(batch)
first, second = temporal.split(int(temporal.shape[0]/2), dim=0)
first = F.normalize(first, dim=2)
second = F.normalize(second, dim=2)
dtw_loss = self.dtw_loss(first, second).mean()
nt_xent_loss, pos, neg = self.ntxent_loss(projected)
loss = nt_xent_loss + self.alpha * dtw_loss
self.log("dtw_val_loss", dtw_loss)
self.log("nt_xent_val_loss", nt_xent_loss)
self.log("ssl_val_loss", loss)
return loss
def configure_optimizers(self):
return self._initialize_optimizer()
def _initialize_optimizer(self):
if self.optimizer_name_ssl.lower() == 'adam':
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"monitor": 'ssl_train_loss'
}
}
elif self.optimizer_name_ssl.lower() == 'lars':
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
optimizer = LARC(optimizer)
return {
"optimizer": optimizer
}
| 4,302 | 36.417391 | 189 | py |
csshar_tfa | csshar_tfa-main/models/vanilla_lstm.py | from torch import nn
class VanillaLSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, n_layers=1, norm_out=False, get_lstm_features=False, initialize_lstm=False):
super(VanillaLSTM, self).__init__()
self.name = 'vanilla_lstm'
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.n_layers)
self.linear = nn.Linear(self.hidden_dim, output_dim)
self.norm_out = norm_out
self.get_lstm_features = get_lstm_features
self.initialize_lstm = initialize_lstm
def forward(self, x, hidden=None):
if self.initialize_lstm:
lstm_out, _ = self.lstm(x, hidden)
else:
lstm_out, _ = self.lstm(x)
out = self.linear(lstm_out[-1])
if self.norm_out:
norm = out.norm(p=2, dim=1, keepdim=True)
out = out.div(norm)
if self.get_lstm_features:
return out, lstm_out[-1]
else:
return out | 921 | 30.793103 | 131 | py |
csshar_tfa | csshar_tfa-main/models/cae.py | import torch
import torch.nn as nn
from models.transformer import ConvLayers, PositionalEncoding, TransformerEncoderLayerWeights, TransformerEncoderWeights
class Encoder(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, pooling_kernel, pooling_padding):
super(Encoder, self).__init__()
self.cnn_block1 = nn.Sequential(
nn.Conv1d(in_channels=in_channels, out_channels=out_channels[0], kernel_size=kernel_size, stride=stride, padding=padding),
# nn.BatchNorm1d(out_channels[0]),
nn.ReLU(inplace=True),
nn.MaxPool1d(pooling_kernel)
)
self.cnn_block2 = nn.Sequential(
nn.Conv1d(in_channels=out_channels[0], out_channels=out_channels[1], kernel_size=kernel_size, stride=stride, padding=padding),
# nn.BatchNorm1d(out_channels[1]),
nn.ReLU(inplace=True),
nn.MaxPool1d(pooling_kernel)
)
self.cnn_block3 = nn.Sequential(
nn.Conv1d(in_channels=out_channels[1], out_channels=out_channels[2], kernel_size=kernel_size, stride=stride, padding=padding),
# nn.BatchNorm1d(out_channels[2]),
nn.ReLU(inplace=True),
nn.MaxPool1d(pooling_kernel)
)
self.flatten = nn.Flatten()
def forward(self, x):
x = self.cnn_block1(x)
x = self.cnn_block2(x)
x = self.cnn_block3(x)
return self.flatten(x)
class Decoder(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, upsample=True):
super(Decoder, self).__init__()
padding = int(kernel_size / 2)
self.out_channels = out_channels
if upsample:
self.decnn_block1 = nn.Sequential(
nn.ConvTranspose1d(in_channels=out_channels[2], out_channels=out_channels[1], kernel_size=kernel_size, stride=stride, padding=padding),
nn.ReLU(inplace=True),
nn.Upsample(7)
)
self.decnn_block2 = nn.Sequential(
nn.ConvTranspose1d(in_channels=out_channels[1], out_channels=out_channels[0], kernel_size=kernel_size, stride=stride, padding=padding),
nn.ReLU(inplace=True),
nn.Upsample(15)
)
self.decnn_block3 = nn.Sequential(
nn.ConvTranspose1d(in_channels=out_channels[0], out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding),
nn.ReLU(inplace=True),
nn.Upsample(30)
)
else:
self.decnn_block1 = nn.Sequential(
nn.ConvTranspose1d(in_channels=out_channels[2], out_channels=out_channels[1], kernel_size=kernel_size, stride=stride, padding=padding),
nn.ReLU(inplace=True)
)
self.decnn_block2 = nn.Sequential(
nn.ConvTranspose1d(in_channels=out_channels[1], out_channels=out_channels[0], kernel_size=kernel_size, stride=stride, padding=padding),
nn.ReLU(inplace=True)
)
self.decnn_block3 = nn.Sequential(
nn.ConvTranspose1d(in_channels=out_channels[0], out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = x.view(x.shape[0], self.out_channels[-1], -1)
x = self.decnn_block1(x)
x = self.decnn_block2(x)
x = self.decnn_block3(x)
return x
class Bottleneck(nn.Module):
def __init__(self, conv_out_size, latent_size):
super(Bottleneck, self).__init__()
self.linear1 = nn.Linear(conv_out_size, latent_size)
self.linear2 = nn.Linear(latent_size, conv_out_size)
def forward(self, x, return_features=False):
x = self.linear1(x)
if not return_features:
x = self.linear2(x)
return x
class Autoencoder(nn.Module):
def __init__(self, in_channels, out_channels, latent_size, kernel_size=3, stride=1, padding=1, pooling_kernel=2, pooling_padding=0, len_seq=30, supervised=False, return_attention=False):
super(Autoencoder, self).__init__()
self.name = 'cae'
self.supervised = supervised
self.num_layers = len(out_channels)
self.encoder = Encoder(in_channels, out_channels, kernel_size, stride, padding, pooling_kernel, pooling_padding)
conv_out_size = len_seq
for _ in range(self.num_layers):
conv_out_size = int((conv_out_size + 2 * padding - (kernel_size - 1) - 1) / stride + 1)
conv_out_size = int((conv_out_size + 2 * pooling_padding - (pooling_kernel - 1) - 1) / pooling_kernel + 1)
conv_out_size = int(out_channels[-1] * conv_out_size)
self.bottleneck = Bottleneck(conv_out_size, latent_size)
self.decoder = Decoder(in_channels, out_channels, kernel_size, stride, padding, upsample=True)
def forward(self, x, return_features=False):
x = self.encoder(x)
x = self.bottleneck(x, return_features)
if not return_features:
x = self.decoder(x)
return x
class TransformerEncoder(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, num_head=8, num_layers=6, max_len=30, dropout=0.1, return_attention=False):
super(TransformerEncoder, self).__init__()
self.cnn = ConvLayers(in_channels, out_channels, kernel_size=kernel_size)
self.positional_encoding = PositionalEncoding(d_model=out_channels[-1], dropout=dropout, max_len=max_len)
self.return_attention = return_attention
if return_attention:
self.encoder_layer = TransformerEncoderLayerWeights(d_model=out_channels[-1], nhead=num_head)
self.transformer_encoder = TransformerEncoderWeights(self.encoder_layer, num_layers=num_layers)
else:
self.encoder_layer = nn.TransformerEncoderLayer(d_model=out_channels[-1], nhead=num_head)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
self.flatten = nn.Flatten()
def forward(self, x):
x = self.cnn(x)
x = self.positional_encoding(x)
if self.return_attention:
x, attention_maps = self.transformer_encoder(x)
else:
x = self.transformer_encoder(x)
x = x.permute(1, 2, 0)
if self.return_attention:
return self.flatten(x), attention_maps
else:
return self.flatten(x)
class TransformerAutoencoder(nn.Module):
def __init__(self, in_channels, len_seq, out_channels, num_head, num_layers, latent_size, dropout, kernel_size=3, supervised=False, return_attention=False):
super(TransformerAutoencoder, self).__init__()
self.name = 'transformer_cae'
self.supervised = supervised
self.num_layers = len(out_channels)
self.return_attention = return_attention
self.encoder = TransformerEncoder(in_channels, out_channels, kernel_size, num_head, num_layers, len_seq, dropout, return_attention=return_attention)
conv_out_size = int(out_channels[-1] * len_seq)
self.bottleneck = Bottleneck(conv_out_size, latent_size)
self.decoder = Decoder(in_channels, out_channels, kernel_size, upsample=False)
def forward(self, x, return_features=False):
if self.return_attention:
x, attention_maps = self.encoder(x)
else:
x = self.encoder(x)
x = self.bottleneck(x, return_features)
if not return_features:
x = self.decoder(x)
if self.return_attention:
return x, attention_maps
else:
return x
| 6,739 | 38.186047 | 187 | py |
csshar_tfa | csshar_tfa-main/models/transformer.py | import math
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from pytorch_lightning.core.lightning import LightningModule
class PositionalEncoding(nn.Module):
"""
Implementation of positional encoding from https://github.com/pytorch/examples/tree/master/word_language_model
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class ConvLayers(nn.Module):
def __init__(self, in_channels, out_channels=[32, 64, 128], kernel_size=3, stride=1, sample_len=30, relu_type='leaky'):
super(ConvLayers, self).__init__()
padding = int(kernel_size / 2)
if relu_type == 'leaky':
self.relu = nn.LeakyReLU()
else:
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=out_channels[0], kernel_size=kernel_size, stride=stride, padding=padding)
self.batchNorm1 = nn.BatchNorm1d(out_channels[0])
self.conv2 = nn.Conv1d(in_channels=out_channels[0], out_channels=out_channels[1], kernel_size=kernel_size, stride=stride, padding=padding)
self.batchNorm2 = nn.BatchNorm1d(out_channels[1])
self.conv3 = nn.Conv1d(in_channels=out_channels[1], out_channels=out_channels[2], kernel_size=kernel_size, stride=stride, padding=padding)
self.batchNorm3 = nn.BatchNorm1d(out_channels[2])
self.out_size = self._compute_out_size(sample_len, padding, kernel_size, stride, 3, out_channels[-1])
@staticmethod
def _compute_out_size(sample_length, padding, kernel_size, stride, num_layers, num_channels):
conv_out_size = sample_length
for _ in range(num_layers):
conv_out_size = int((conv_out_size + 2 * padding - (kernel_size - 1) - 1) / stride + 1)
return int(num_channels * conv_out_size)
def forward(self, x):
x = self.relu(self.batchNorm1(self.conv1(x)))
x = self.relu(self.batchNorm2(self.conv2(x)))
x = self.relu(self.batchNorm3(self.conv3(x)))
return x.permute(2, 0, 1)
class TransformerEncoderLayerWeights(nn.TransformerEncoderLayer):
def forward(self, src: Tensor, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
src2, attention_maps = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, attention_maps
class TransformerEncoderWeights(nn.TransformerEncoder):
def forward(self, src: Tensor, mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None):
output = src
attention_maps_list = []
for mod in self.layers:
output, attention_maps = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
attention_maps_list.append(attention_maps)
if self.norm is not None:
output = self.norm(output)
return output, torch.stack(attention_maps_list)
class Transformer(nn.Module):
def __init__(self, in_channels, max_len, out_channels=[32, 64, 128], num_head=8, num_layers=6, kernel_size=3, dropout=0.1, return_attention=False, use_cls=False, **kwargs):
super().__init__()
self.name = 'transformer'
self.in_channels = in_channels
self.out_channels = out_channels
self.num_head = num_head
self.num_layers = num_layers
self.kernel_size = kernel_size
self.dropout = dropout
self.use_cls = use_cls
self.cnn = ConvLayers(in_channels, out_channels, kernel_size=kernel_size, sample_len=max_len)
self.positional_encoding = PositionalEncoding(d_model=out_channels[-1], dropout=dropout, max_len=max_len + int(self.use_cls))
if return_attention:
self.return_attention = True
self.encoder_layer = TransformerEncoderLayerWeights(d_model=out_channels[-1], nhead=num_head)
self.transformer_encoder = TransformerEncoderWeights(self.encoder_layer, num_layers=num_layers)
else:
self.return_attention = False
self.encoder_layer = nn.TransformerEncoderLayer(d_model=out_channels[-1], nhead=num_head)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
self.dropout = nn.Dropout(p=dropout)
self.use_cls = use_cls
if not self.use_cls:
self.out_size = self.cnn.out_size
else:
self.out_size = out_channels[-1]
def forward(self, x):
x = self.cnn(x)
if self.use_cls:
x = self._append_cls_token(x)
x = self.positional_encoding(x)
if self.return_attention:
x, attention_maps = self.transformer_encoder(x)
else:
x = self.transformer_encoder(x)
x = self.dropout(x)
if self.use_cls:
x = x[0]
else:
x = x.permute(1, 0, 2)
if self.return_attention:
return x, attention_maps
else:
return x
def _append_cls_token(self, x):
cls_batch = nn.Parameter((torch.randn(1, x.shape[1], self.out_channels[-1]))).cuda()
x = torch.cat((cls_batch, x), 0)
return x | 6,082 | 39.553333 | 176 | py |
csshar_tfa | csshar_tfa-main/datasets/sensor_torch_datamodule.py | from typing import Optional
from pytorch_lightning import LightningDataModule
from torch.utils.data.dataloader import DataLoader
from datasets.sensor_torch_dataset import SensorTorchDataset
class SensorDataModule(LightningDataModule):
def __init__(self,
train_path,
val_path,
test_path,
batch_size,
train_transforms = {},
test_transforms = {},
ssl = False,
cae=False,
n_views = 2,
num_workers = 1,
limited_k=None,
store_in_ram=True):
super().__init__()
# paths
self.train_path = train_path
self.val_path = val_path
self.test_path = test_path
# batch and transforms
self.batch_size = batch_size
self.train_transforms = train_transforms
self.test_transforms = test_transforms
# ssl related
self.ssl = ssl
self.cae = cae
self.n_views = n_views
self.num_workers = num_workers
self.limited_k = limited_k
self.store_in_ram = store_in_ram
self._init_dataloaders()
self.save_hyperparameters("batch_size", "ssl", "cae", "n_views", "limited_k")
def _init_dataloaders(self):
train_dataset = self._create_train_dataset()
test_dataset = self._create_test_dataset()
val_dataset = self._create_val_dataset()
drop_last_ssl = bool(self.ssl)
self.train = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=drop_last_ssl, num_workers=self.num_workers, pin_memory=True)
self.test = DataLoader(test_dataset, batch_size=self.batch_size, shuffle=False, drop_last=False, num_workers=self.num_workers, pin_memory=True)
self.val = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False, drop_last=drop_last_ssl, num_workers=self.num_workers, pin_memory=True)
def _create_train_dataset(self):
return SensorTorchDataset(
data_path=self.train_path,
ssl=self.ssl,
transforms= self.train_transforms,
limited=True if self.limited_k is not None else False,
limited_k=self.limited_k,
instance_data=False,
cae=self.cae,
store_in_ram=self.store_in_ram
)
def _create_val_dataset(self):
if self.ssl:
val_transforms = self.train_transforms
else:
val_transforms = self.test_transforms
return SensorTorchDataset(
data_path=self.val_path,
ssl = self.ssl,
transforms=val_transforms,
cae = self.cae,
store_in_ram=self.store_in_ram
)
def _create_test_dataset(self):
return SensorTorchDataset(
data_path=self.test_path,
store_in_ram=self.store_in_ram
)
def train_dataloader(self):
return self.train
def val_dataloader(self):
return self.val
def test_dataloader(self):
return self.test
| 3,074 | 31.03125 | 160 | py |
csshar_tfa | csshar_tfa-main/datasets/mobi_act_data.py | import os
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
SCENARIOS_TO_IGNORE = {
'FOL',
'FKL',
'SDL',
'LYI',
'SLH',
'SBW',
'SLW',
'SBE',
'SRH',
'BSC'
}
MOBI_ACT_LABELS_DICT = {
'STD': 0,
'WAL': 1,
'JOG': 2,
'JUM': 3,
'STU': 4,
'STN': 5,
'SCH': 6,
'SIT': 7,
'CHU': 8,
'CSI': 9,
'CSO': 10,
'LYI': -1,
'FOL': -1,
'FKL': -1,
'BSC': -1,
'SDL': -1
}
MOBI_ACT_COLUMNS_TO_IGNORE = [
'rel_time',
'azimuth',
'pitch',
'roll'
]
class MobiActDataset():
""" A class for MobiAct dataset structure inculding paths to each subject and experiment file
Attributes:
-----------
root_dir : str
Path to the root directory of the dataset (data/mobi_act/MobiAct_Dataset_v2.0/Annotated Data/)
datafiles_dict : dict
Dictionary for storing paths to each user and experiment
"""
def __init__(self, root_dir):
self.root_dir = root_dir
self.datafiles_dict = self.get_datafiles_dict()
def get_datafiles_dict(self):
""" Get dictionary with all subjects with corresponding raw datasets
"""
scenarios = os.listdir(self.root_dir)
datafiles_dict = {}
for scenario in scenarios:
tmp_path = os.path.join(self.root_dir, scenario)
files = os.listdir(tmp_path)
for file_ in files:
tmp_subj = int(file_.split('_')[1])
tmp_exp = int(file_.split('_')[2])
if tmp_subj not in datafiles_dict:
datafiles_dict[tmp_subj] = {}
if tmp_exp not in datafiles_dict[tmp_subj]:
datafiles_dict[tmp_subj][tmp_exp] = []
datafiles_dict[tmp_subj][tmp_exp].append(os.path.join(tmp_path, file_))
return datafiles_dict
def get_files(self, subject, exp, scenario):
""" Get file for a subject
"""
file_ = [file_ for file_ in self.datafiles_dict[subject][exp] if scenario in file_]
if len(file_) == 1:
file_ = file_[0]
return file_
class MobiActInstance():
def __init__(self, data_path, columns_to_ignore=None):
self.data_path = data_path
self.user_id, self.exp_id = self.parse_user_exp()
self.data, self.labels_col = self.read_data(columns_to_ignore)
self.labels_summary = self.form_labels_df()
def read_data(self, columns_to_ignore=None):
data = pd.read_csv(self.data_path)
data = data.rename(columns={'timestamp': 'timestep'})
labels = pd.DataFrame(data['label'])
if columns_to_ignore:
data = data.drop(columns_to_ignore, axis=1)
data = data.drop(['label'], axis=1)
labels = labels.replace({'label': MOBI_ACT_LABELS_DICT})
return data, labels
def parse_user_exp(self):
""" Get user ID for current file from its path
"""
filename = os.path.split(self.data_path)[-1]
subj = int(filename.split('_')[1])
exp = int(filename.split('_')[2])
return subj, exp
def form_labels_df(self):
""" Function for forming labels summary dataframe from the columns
"""
timestep_label = pd.concat([self.data, self.labels_col], axis=1).drop([col for col in self.data.columns if col not in ['timestep', 'label']], axis=1)
min_timestep_label = timestep_label.groupby((timestep_label.label != timestep_label.label.shift()).cumsum()).min()
max_timestep_label = timestep_label.groupby((timestep_label.label != timestep_label.label.shift()).cumsum()).max()
conc = pd.concat([min_timestep_label, max_timestep_label], axis=1)
conc.columns = ['start_timestep', 'label_to_drop', 'end_timestep', 'label']
conc = conc.drop('label_to_drop', axis=1).reset_index(drop=True)
return conc
def test():
data = "data/mobi_act/MobiAct_Dataset_v2.0/Annotated Data/"
test_dataset = MobiActDataset(data)
print(test_dataset.datafiles_dict)
print('-----------------------------')
exp_id = 1
user_name = 1
scenario = 'SLW'
file_ = test_dataset.get_files(user_name, exp_id, scenario)
print(file_)
print('-----------------------------')
instance = MobiActInstance(file_)
print(instance.data)
print(instance.labels_col)
print('-----------------------------')
print(instance.labels_summary)
if __name__ == "__main__":
test() | 3,950 | 25.695946 | 151 | py |
csshar_tfa | csshar_tfa-main/datasets/pamap_data.py | import os
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
class PamapDataset():
""" A class for Pamap2 dataset structure inculding paths to each subject and experiment file
Attributes:
-----------
root_dir : str
Path to the root directory of the dataset
datafiles_dict : dict
Dictionary for storing paths to each user and experiment
"""
def __init__(self, root_dir):
self.root_dir = root_dir
self.datafiles_dict = self.get_datafiles_dict()
def get_datafiles_dict(self):
""" Get dictionary with all subjects with corresponding raw datasets
"""
filenames_protocol = os.listdir(self.root_dir + '/Protocol')
filenames_optional = os.listdir(self.root_dir + '/Optional')
file_paths_protocol = [os.path.join(self.root_dir + '/Protocol', file) for file in filenames_protocol]
file_paths_optional = [os.path.join(self.root_dir + '/Optional', file) for file in filenames_optional]
subjects = sorted(set([file.split('.')[0] for file in filenames_protocol]))
res_dict = {}
for subject in subjects:
res_dict[subject] = {}
res_dict[subject]['exp1'] = [file_path for file_path in file_paths_protocol if subject in file_path][0]
for file_path in file_paths_optional:
if subject in file_path:
res_dict[subject]['exp2'] = file_path
return res_dict
def get_files(self, subject, exp):
""" Get file for a subject
"""
return self.datafiles_dict[subject][exp]
class PamapInstance():
def __init__(self, data_path, substring_eliminate=['orient', 'temperature', 'acc2']):
self.data_path = data_path
self.user_id, self.exp_id = self.parse_user_exp()
self.data, self.labels_col = self.read_data(substring_eliminate)
self.labels_summary = self.form_labels_df()
def read_data(self, substring_eliminate):
""" Read a single PAMAP2 instance data and eliminate redundnant features
Returns two dataframes: features and labels
Attributes:
substring_eliminate (list): a list of substrings of columns names to be eliminated
"""
# Define all feature names
imu_cols = ['temperature',
'acc1_X', 'acc1_Y', 'acc1_Z',
'acc2_X', 'acc2_Y', 'acc2_Z',
'gyro_X', 'gyro_Y', 'gyro_Z',
'mg_X', 'mg_Y', 'mg_Z',
'orient1', 'orient2', 'orient3', 'orient4']
locations = ['hand', 'body', 'ankle']
df_cols = ['timestep', 'label', 'heartrate']
imu_cols_full = [location + '_' + imu_col for location in locations for imu_col in imu_cols]
df_cols.extend(imu_cols_full)
# Read datasets
df = pd.read_csv(self.data_path, sep=' ', header=None)
df.columns = df_cols
# Eliminate redundant columns
eliminate_cols = []
for substring in substring_eliminate:
eliminate_cols.extend([col for col in df_cols if substring in col])
data = df.drop(eliminate_cols, axis=1).drop('label', axis=1)
labels = pd.DataFrame(df['label'])
# Split to data and labels
return data, labels
def parse_user_exp(self):
""" Get user ID for current file from its path
"""
if 'Optional' in self.data_path:
exp = 'exp2'
else:
exp = 'exp1'
return os.path.basename(self.data_path).split('.')[0], exp
def form_labels_df(self):
""" Function for forming labels summary dataframe from the columns
"""
timestep_label = pd.concat([self.data, self.labels_col], axis=1).drop([col for col in self.data.columns if col not in ['timestep', 'label']], axis=1)
min_timestep_label = timestep_label.groupby((timestep_label.label != timestep_label.label.shift()).cumsum()).min()
max_timestep_label = timestep_label.groupby((timestep_label.label != timestep_label.label.shift()).cumsum()).max()
conc = pd.concat([min_timestep_label, max_timestep_label], axis=1)
conc.columns = ['start_timestep', 'label_to_drop', 'end_timestep', 'label']
conc = conc.drop('label_to_drop', axis=1).reset_index(drop=True)
return conc
| 4,405 | 40.566038 | 157 | py |
csshar_tfa | csshar_tfa-main/datasets/motion_sense_data.py | import os
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
ACTIVITIES_DICT = {
'dws': 0,
'jog': 1,
'sit': 2,
'std': 3,
'ups': 4,
'wlk': 5
}
MOTION_SENSE_COLUMNS_TO_IGNORE = [
'attitude.roll',
'attitude.pitch',
'attitude.yaw',
'gravity.x',
'gravity.y',
'gravity.z'
]
class MotionSenseDataset():
""" A class for Opportunity dataset structure inculding paths to each subject and experiment file
Attributes:
-----------
root_dir : str
Path to the root directory of the dataset (data/motion_sense/A_DeviceMotion_data/A_DeviceMotion_data/)
datafiles_dict : dict
Dictionary for storing paths to each user and experiment
"""
def __init__(self, root_dir):
self.root_dir = root_dir
self.datafiles_dict = self.get_datafiles_dict()
def get_datafiles_dict(self):
""" Get dictionary with all subjects with corresponding raw datasets
"""
folders = os.listdir(self.root_dir)
all_files_paths = []
datafiles_dict = {}
for folder in folders:
act, exp_id = folder.split('_')
exp_id = int(exp_id)
act_id = ACTIVITIES_DICT[act]
tmp_path = os.path.join(self.root_dir, folder)
files = os.listdir(tmp_path)
for file_ in files:
subj = file_.split('.')[0]
all_files_paths.append((os.path.join(tmp_path, file_), subj, act_id, exp_id))
for file_ in all_files_paths:
if file_[1] not in datafiles_dict:
datafiles_dict[file_[1]] = {}
if file_[2] not in datafiles_dict[file_[1]]:
datafiles_dict[file_[1]][file_[2]] = {}
datafiles_dict[file_[1]][file_[2]][file_[3]] = file_[0]
return datafiles_dict
def get_file(self, user_name, activity, experiment):
""" Get file for a subject
"""
return self.datafiles_dict[user_name][activity][experiment]
class MotionSenseInstance():
def __init__(self, data_path, columns_to_ignore=None):
self.data_path = data_path
self.user_id, self.exp_id, self.label = self.parse_userexplabel()
self.data, self.labels_col = self.read_data(columns_to_ignore)
def parse_userexplabel(self):
subject = self.data_path.split('/')[-1][:-4]
act_exp = self.data_path.split('/')[-2]
act, exp_id = act_exp.split('_')
act_id = ACTIVITIES_DICT[act]
exp_id = int(exp_id)
return subject, exp_id, act_id
def read_data(self, columns_to_ignore=None):
data = pd.read_csv(self.data_path)
data = data.iloc[:, 1:]
if columns_to_ignore:
data = data.drop(columns_to_ignore, axis=1)
labels = pd.DataFrame([self.label] * data.shape[0])
labels.columns = ['label']
return data, labels
def test():
data = "data/motion_sense/A_DeviceMotion_data/A_DeviceMotion_data/"
test_dataset = MotionSenseDataset(data)
print(test_dataset.datafiles_dict)
print('-----------------------------')
act_id = 0
exp_id = 2
user_name = 'sub_1'
file_ = test_dataset.get_file(user_name, act_id, exp_id)
print(file_)
print('-----------------------------')
instance = MotionSenseInstance(file_)
print(instance.data)
print(instance.labels_col)
if __name__ == "__main__":
test() | 3,012 | 26.390909 | 105 | py |
csshar_tfa | csshar_tfa-main/datasets/sensor_torch_dataset.py | import os
import numpy as np
import pandas as pd
import random
from torch.utils.data import Dataset
from tqdm import tqdm
class SensorTorchDataset(Dataset):
def __init__(self, data_path, get_subjects=False, subj_act=False, ignore_subject=None, column_names=None, ssl=False, transforms=None, limited=False, limited_k=1, instance_data=False, cae=False, store_in_ram=True):
super().__init__()
self.data_path = data_path
self.limited = limited
self.limited_k = limited_k
self.ignore_subject = ignore_subject
# if true the whole dataset is processed to RAM for faster training
self.store_in_ram = store_in_ram
if store_in_ram:
print("Reading CSV files of {}...".format(data_path))
self.dataframes = [pd.read_csv(file_).fillna(0.0) for i, file_ in enumerate(tqdm(self.data_files))]
print("Done")
self.len = len(self.data_files)
self.subj_act = subj_act
self.subj_act_to_id, self.id_to_subj_act = get_subject_activity_dictionaries(self.data_path)
self.act_to_id, self.id_to_act = get_activity_dictionaries(self.data_path)
self.subj_to_id, self.id_to_subj = get_subject_dictionaries(self.data_path)
self.subjects = [self.subj_to_id[int(''.join(list(filter(str.isdigit, os.path.basename(file_).split('_')[0]))))] for file_ in self.data_files]
self.activities = [self.act_to_id[int(os.path.basename(file_).split('_')[2][1:])] for file_ in self.data_files]
self.get_subjects = get_subjects
self.column_names = column_names
self.ssl = ssl
self.cae = cae
self.instance_data = instance_data
if self.ssl or self.cae:
if transforms:
self.transforms = transforms
else:
raise AttributeError('Provide tranforms in order to use ssl approach')
@property
def data_files(self):
if self.ignore_subject:
return [os.path.join(self.data_path, filename) for filename in os.listdir(self.data_path) if self.ignore_subject not in filename]
elif self.limited:
files_per_activity = {}
for filename in os.listdir(self.data_path):
tmp_act = filename.split('_')[2]
tmp_path = os.path.join(self.data_path, filename)
if tmp_act in files_per_activity:
files_per_activity[tmp_act].append(tmp_path)
else:
files_per_activity[tmp_act] = [tmp_path]
data_files = []
for activity in files_per_activity:
data_files.extend(random.sample(files_per_activity[activity], self.limited_k))
return data_files
else:
return [os.path.join(self.data_path, filename) for filename in os.listdir(self.data_path)]
@property
def labels(self):
if self.subj_act:
return [self.subj_act_to_id['subject' + str(self.id_to_subj[self.subjects[i]])][self.id_to_act[self.activities[i]]] for i in range(self.len)]
else:
return self.activities
def __len__(self):
return self.len
def __getitem__(self, idx):
# read file by index
if self.store_in_ram:
signals = self.dataframes[idx]
else:
signals = pd.read_csv(self.data_files[idx]).fillna(0.0)
if self.column_names:
signals_list = []
for col_name in self.column_names:
columns = [column for column in signals.columns if col_name in column]
tmp_signals = np.array(signals[columns])
signals_list.append(tmp_signals)
# get its label (activity or subject-activity)
if self.subj_act:
subject = self.subjects[idx]
act = self.activities[idx]
label = self.subj_act_to_id['subject' + str(self.id_to_subj[subject])][self.id_to_act[act]]
else:
label = self.labels[idx]
if self.column_names:
return np.stack(signals_list), label
else:
if self.ssl:
x1 = self.transforms(np.array(signals))
x2 = self.transforms(np.array(signals))
if self.instance_data:
return np.array(signals), x1, x2
else:
return x1, x2
elif self.cae:
return self.transforms(np.array(signals)), np.array(signals)
else:
if self.get_subjects:
subject = self.subjects[idx]
return np.array(signals), label, subject
else:
return np.array(signals), label
def get_activity_dictionaries(path):
""" Creates dictionaries mapping label from dataset to numbered list and vice versa
Parameters
----------
path : str
path to the folder with sampled files
"""
activities = sorted(set([int(file.split('_')[2][1:]) for file in os.listdir(path)]))
act_to_id = dict()
id_to_act = dict()
for i in range(len(activities)):
act_to_id[activities[i]] = i
id_to_act[i] = activities[i]
return act_to_id, id_to_act
def get_subject_dictionaries(path):
""" Creates dictionaries mapping subjects from dataset to numbered list and vice versa
Parameters
----------
path : str
path to the folder with sampled files
"""
subjects = sorted(set([int(''.join(list(filter(str.isdigit, file.split('_')[0])))) for file in os.listdir(path)]))
subj_to_id = dict()
id_to_subj = dict()
for i in range(len(subjects)):
subj_to_id[subjects[i]] = i
id_to_subj[i] = subjects[i]
return subj_to_id, id_to_subj
def get_subject_activity_dictionaries(path, exclude_subject=''):
activities = sorted(set([int(file.split('_')[2][1:]) for file in os.listdir(path)]))
subjects = sorted(set([file.split('_')[0] for file in os.listdir(path)]))
if exclude_subject in subjects:
subjects.remove(exclude_subject)
idx = 0
subj_act_to_id = dict()
id_to_subj_act = dict()
for subject in subjects:
subj_act_to_id[subject] = dict()
for activity in activities:
subj_act_files = [file_ for file_ in os.listdir(path) if subject in file_ and file_.split('_')[2] == 'a' + str(activity)]
if len(subj_act_files) > 0:
subj_act_to_id[subject][activity] = idx
id_to_subj_act[idx] = subject + '_a' + str(activity)
idx += 1
return subj_act_to_id, id_to_subj_act
def test_limited(data_path, iterations=30):
for _ in range(iterations):
k = random.randint(1, 100)
limited_train_dataset = SensorTorchDataset(data_path, limited=True, limited_k=k)
activities = set(map(lambda x: int(x.split('_')[2][1:]) - 1, os.listdir(data_path)))
assert sorted(list(set(limited_train_dataset.activities))) == sorted(list(activities)), 'Not all activities present'
for activity in activities:
assert limited_train_dataset.activities.count(activity) == k, 'Activities present in different proportions'
print('All activities are present equally')
def main():
data_path_uci = "./sampled_data/uci_har/uci_smartphones/train"
test_limited(data_path_uci)
if __name__ == '__main__':
main() | 7,463 | 41.651429 | 217 | py |
csshar_tfa | csshar_tfa-main/utils/augmentation_utils.py | import numpy as np
import pandas as pd
from torchvision import transforms
class Shift():
def __init__(self, max_shift):
self.max_shift = max_shift
def __call__(self, x):
shift_len = np.random.randint(0, self.max_shift)
x = np.roll(x, shift_len, axis=0)
return x
class Jittering():
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, x):
noise = np.random.normal(loc=0, scale=self.sigma, size=x.shape)
x = x + noise
return x
class Scaling():
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, x):
factor = np.random.normal(loc=1., scale=self.sigma, size=(x.shape))
x = x * factor
return x
class Rotation():
def __init__(self):
pass
def __call__(self, x):
flip = np.random.choice([-1, 1], size=(x.shape))
return flip * x
class ChannelShuffle():
def __init__(self):
pass
def __call__(self, x):
rotate_axis = np.arange(x.shape[1])
np.random.shuffle(rotate_axis)
return x[:, rotate_axis]
class Permutation():
def __init__(self, max_segments=5):
self.max_segments = max_segments
def __call__(self, x):
orig_steps = np.arange(x.shape[0])
num_segs = np.random.randint(1, self.max_segments)
ret = np.zeros_like(x)
if num_segs > 1:
splits = np.array_split(orig_steps, num_segs)
warp = np.concatenate(np.random.permutation(splits)).ravel()
ret = x[warp]
else:
ret = x
return ret
augmentations_dict = {
'jittering': Jittering,
'scaling': Scaling,
'rotation': Rotation,
'permutation': Permutation,
'channel_shuffle': ChannelShuffle,
'shift': Shift
}
def compose_random_augmentations(config_dict, prob=0.5):
transforms_list = []
for key in config_dict:
if config_dict[key]['apply']:
if 'parameters' in config_dict[key]:
augmentation = augmentations_dict[key](**config_dict[key]['parameters'])
else:
augmentation = augmentations_dict[key]()
if key == ' jittering':
transforms_list.append(augmentation)
else:
transforms_list.append(transforms.RandomApply([augmentation], p=prob))
return transforms_list
| 2,407 | 23.824742 | 88 | py |
csshar_tfa | csshar_tfa-main/utils/training_utils.py | import importlib
import itertools
import os
import shutil
import torch
from models.mlp import ProjectionMLP
from models.simclr import SimCLR
from models.mlp import MLP, MLPDropout
from models.supervised import SupervisedModel
from torchvision import transforms
from pytorch_lightning import loggers
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from callbacks.log_classifier_metrics import LogClassifierMetrics
from callbacks.log_confusion_matrix import LogConfusionMatrix
from datasets.sensor_torch_datamodule import SensorDataModule
from utils.augmentation_utils import compose_random_augmentations
from utils.experiment_utils import load_yaml_to_dict
def init_transforms(random_augmentations_dict={}):
train = compose_random_augmentations(random_augmentations_dict)
# train.append(transforms.ToTensor())
train = transforms.Compose(train)
test = transforms.Compose([])
return train, test
def init_datamodule(train_path, val_path, test_path, batch_size,
train_transforms={}, test_transforms={},
ssl = False, n_views = 2, num_workers = 1, limited_k=None, store_in_ram=True):
data_module = SensorDataModule(train_path, val_path, test_path, batch_size, train_transforms = train_transforms, test_transforms = test_transforms,
ssl = ssl, n_views = n_views, num_workers = num_workers, limited_k = limited_k, store_in_ram = store_in_ram)
return data_module
def init_model(model_cfg, metric_scheduler='accuracy', ckpt_path=None):
module = importlib.import_module(f"models.{model_cfg['from_module']}")
class_ = getattr(module, model_cfg['class_name'])
if ckpt_path is None:
return class_(*model_cfg['args'], **model_cfg['kwargs'], metric_scheduler=metric_scheduler)
else:
return class_.load_from_checkpoint(ckpt_path, strict=False)
def init_ssl_pretrained(model_cfg, ckpt_path, fc_size, n_classes, ssl_batch_size=256, temperature=0.1):
encoder = init_encoder(model_cfg)
projection = ProjectionMLP(encoder.out_size, fc_size, n_classes)
class_ = SimCLR(encoder, projection, ssl_batch_size, temperature)
return class_.load_from_checkpoint(ckpt_path, encoder=encoder, mlp_in_size=encoder.out_size, strict=False)
def init_encoder(model_cfg, ckpt_path=None):
module = importlib.import_module(f"models.{model_cfg['from_module']}")
class_ = getattr(module, model_cfg['encoder_class_name'])
if ckpt_path is None:
return class_(*model_cfg['args'], **model_cfg['kwargs'])
else:
return class_.load_from_checkpoint(ckpt_path)
def init_finetuned_ckpt(model_cfg, n_classes, le=False, ckpt_path=None, mlp_do=True):
encoder = init_encoder(model_cfg)
if le:
LinearClassifier(encoder.out_size, n_classes)
elif mlp_do:
classifier = MLPDropout(encoder.out_size, n_classes)
else:
classifier = MLP(encoder.out_size, n_classes)
model = SupervisedModel(encoder=encoder, classifier=classifier)
return model.load_from_checkpoint(ckpt_path, encoder=encoder, classifier=classifier, strict=False)
def parse_splits(dataset_configs):
return dataset_configs['splits']['train'], dataset_configs['splits']['val'], dataset_configs['splits']['test']
def setup_tb_logger(dir, name):
return loggers.TensorBoardLogger(dir, name=name)
def setup_wandb_logger(experiment_info, dataset, experiment_id, entity='ssl_har', approach='supervised'):
return loggers.WandbLogger(config=experiment_info, entity=entity, project=f"{approach}-{dataset}", name=experiment_id, id=experiment_id)
def setup_loggers(logger_names=['tensorboard', 'wandb'], tb_dir=None, experiment_info=None, dataset=None,
experiment_id=None, entity='sensor_har', approach='supervised', experiment_config_path=None):
loggers = []
loggers_dict = {}
if 'tensorboard' in logger_names:
tb_logger = setup_tb_logger(tb_dir, experiment_id)
loggers.append(tb_logger)
loggers_dict['tensorboard'] = tb_logger
if 'wandb' in logger_names:
wandb_logger = setup_wandb_logger(experiment_info, dataset, experiment_id, entity, approach)
loggers.append(wandb_logger)
loggers_dict['wandb'] = wandb_logger
shutil.copy(experiment_config_path, os.path.join(wandb_logger.experiment.dir, "experiment_config.yaml"))
return loggers, loggers_dict
def setup_early_stopping_callback(metric, min_delta=0.00, patience=50, mode="min"):
return EarlyStopping(monitor=metric, min_delta=min_delta, patience=patience, verbose=False, mode=mode)
def setup_confusion_matrix_logger(class_names):
return LogConfusionMatrix(class_names)
def setup_classifier_metrics_logger(num_classes, metric_names=['accuracy', 'f1-score', 'precision', 'recall'], average='macro'):
return LogClassifierMetrics(num_classes, metric_names, average=average)
def setup_model_checkpoint_callback(model_weights_path, metric, dataset, model, experiment_id):
return ModelCheckpoint(
monitor=metric,
dirpath=os.path.join(model_weights_path, f"{dataset}-{model}-{experiment_id}"),
filename="{epoch}",
save_top_k=1,
mode="max",
save_last=True
)
def setup_model_checkpoint_callback_last(model_weights_path, dataset, model, experiment_id):
return ModelCheckpoint(
save_last=True,
dirpath=os.path.join(model_weights_path, f"{dataset}-{model}-{experiment_id}"),
filename="{epoch}"
)
def setup_callbacks(early_stopping_metric, early_stopping_mode, class_names, num_classes, no_ckpt, model_weights_path, metric, dataset, model, experiment_id):
callbacks = []
callbacks.append(setup_early_stopping_callback(early_stopping_metric, mode=early_stopping_mode))
callbacks.append(setup_confusion_matrix_logger(class_names))
callbacks.append(setup_classifier_metrics_logger(num_classes))
if not no_ckpt:
callbacks.append(setup_model_checkpoint_callback(model_weights_path, metric, dataset, model, experiment_id))
return callbacks
def setup_callbacks_ssl(no_ckpt, model_weights_path, dataset, model, experiment_id, online_eval, online_eval_args):
callbacks = []
if not no_ckpt:
callbacks.append(setup_model_checkpoint_callback_last(model_weights_path, dataset, model, experiment_id))
return callbacks
def check_sampling_cfg(model_cfg, transform_cfg):
for i, transform in enumerate(transform_cfg):
if ('transform_name' in transform
and transform['transform_name'] == 'sampling'
and transform_cfg[i]['kwargs']['size'] != model_cfg['kwargs']['sample_length']):
transform_cfg[i]['kwargs']['size'] = model_cfg['kwargs']['sample_length']
return model_cfg, transform_cfg
def flat_key_to_dict(flat_key, value):
"""
Example input: "augmentations_configs.scaling.apply", True
Example output: {'augmentations_configs': {'scaling': {'apply': True}}}
"""
tokens = flat_key.split('.')
if len(tokens) == 1:
return {flat_key: value}
last_key = tokens[-1]
last_dict = {last_key: value}
for inner_key in tokens[-2::-1]:
last_dict = {inner_key: last_dict}
return last_dict
def deep_merge_dicts(dict1, dict2):
"""
Performs a deep merge of dict1 and dict2, with leaf values from dict2 overwriting dict1 if needed.
"""
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
yield (k, dict(deep_merge_dicts(dict1[k], dict2[k])))
else:
# If one of the values is not a dict, value from second dict overrides one in first and we move on.
yield (k, dict2[k])
elif k in dict1:
yield (k, dict1[k])
else:
yield (k, dict2[k])
def flat_to_nested_dict(flat_dict):
"""
Example input:
{
"augmentations.scaling.apply": True,
"augmentations.scaling.parameters.min_p": 0.5,
"augmentations.scaling.parameters.max_p": 0.8
}
Example output:
{
"augmentations": {
"scaling": {
"apply": True,
"parameters": {
"min_p": 0.5,
"max_p": 0.8
}
}
}
}
"""
nested_dict = {}
for key in flat_dict:
nested_inner = flat_key_to_dict(key, flat_dict[key])
nested_dict = dict(deep_merge_dicts(nested_dict, nested_inner))
return nested_dict
def nested_to_flat_dict(nested_dict):
out = {}
for key, val in nested_dict.items():
if isinstance(val, dict):
val = [val]
if isinstance(val, list):
for subdict in val:
deeper = nested_to_flat_dict(subdict).items()
out.update({key + '.' + key2: val2 for key2, val2 in deeper})
else:
out[key] = val
return out | 9,115 | 38.124464 | 158 | py |
csshar_tfa | csshar_tfa-main/utils/experiment_utils.py | import datetime
import json
import os
import random
import numpy as np
import torch
import yaml
def generate_experiment_id():
""" A function for generating unique experiment id based on the current time"""
return str(datetime.datetime.now()).replace(' ', '_').replace(':', '_').replace('.', '_')
def generate_weights_file(directory, name, experiment_id):
""" A function returning unique tensorboard filename for an experiment
"""
return os.path.join(directory, name + '_' + experiment_id + '.pth')
def get_device(device_name):
""" A function for setting available device """
return torch.device(device_name) if torch.cuda.is_available() else torch.device('cpu')
def read_json_to_dict(path):
with open(path) as json_file:
res_dict = json.load(json_file)
return res_dict
def load_yaml_to_dict(path):
with open(path, "r") as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
exit(1)
def create_results_csv(csv_path, df):
if not os.path.exists(os.path.split(csv_path)[0]):
os.makedirs(os.path.split(csv_path)[0])
df.to_csv(csv_path, index=None)
def dict_to_json(dict_, json_path):
if not os.path.exists(os.path.split(json_path)[0]):
os.makedirs(os.path.split(json_path)[0])
with open(json_path, 'w') as fp:
json.dump(dict_, fp)
def seed_all(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
| 1,704 | 25.640625 | 93 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/test_dir.py | import sys
import os
import os.path as osp
import pdb
import json
import tqdm
import numpy as np
import torch
import torch.nn.functional as F
from dirtorch.utils.convenient import mkdir
from dirtorch.utils import common
from dirtorch.utils.common import tonumpy, matmul, pool
from dirtorch.utils.pytorch_loader import get_loader
import dirtorch.nets as nets
import dirtorch.datasets as datasets
import dirtorch.datasets.downloader as dl
import pickle as pkl
import hashlib
def expand_descriptors(descs, db=None, alpha=0, k=0):
assert k >= 0 and alpha >= 0, 'k and alpha must be non-negative'
if k == 0:
return descs
descs = tonumpy(descs)
n = descs.shape[0]
db_descs = tonumpy(db if db is not None else descs)
sim = matmul(descs, db_descs)
if db is None:
sim[np.diag_indices(n)] = 0
idx = np.argpartition(sim, int(-k), axis=1)[:, int(-k):]
descs_aug = np.zeros_like(descs)
for i in range(n):
new_q = np.vstack([db_descs[j, :] * sim[i, j]**alpha for j in idx[i]])
new_q = np.vstack([descs[i], new_q])
new_q = np.mean(new_q, axis=0)
descs_aug[i] = new_q / np.linalg.norm(new_q)
return descs_aug
def extract_image_features(dataset, transforms, net, ret_imgs=False, same_size=False, flip=None,
desc="Extract feats...", iscuda=True, threads=8, batch_size=8):
""" Extract image features for a given dataset.
Output is 2-dimensional (B, D)
"""
if not same_size:
batch_size = 1
old_benchmark = torch.backends.cudnn.benchmark
torch.backends.cudnn.benchmark = False
loader = get_loader(dataset, trf_chain=transforms, preprocess=net.preprocess, iscuda=iscuda,
output=['img'], batch_size=batch_size, threads=threads, shuffle=False)
if hasattr(net, 'eval'):
net.eval()
tocpu = (lambda x: x.cpu()) if ret_imgs == 'cpu' else (lambda x: x)
img_feats = []
trf_images = []
with torch.no_grad():
for inputs in tqdm.tqdm(loader, desc, total=1+(len(dataset)-1)//batch_size):
imgs = inputs[0]
for i in range(len(imgs)):
if flip and flip.pop(0):
imgs[i] = imgs[i].flip(2)
imgs = common.variables(inputs[:1], net.iscuda)[0]
desc = net(imgs)
if ret_imgs:
trf_images.append(tocpu(imgs.detach()))
del imgs
del inputs
if len(desc.shape) == 1:
desc.unsqueeze_(0)
img_feats.append(desc.detach())
img_feats = torch.cat(img_feats, dim=0)
if len(img_feats.shape) == 1:
img_feats.unsqueeze_(0)
if not same_size:
torch.backends.cudnn.benchmark = old_benchmark
if ret_imgs:
if same_size:
trf_images = torch.cat(trf_images, dim=0)
return trf_images, img_feats
return img_feats
def eval_model(db, net, trfs, pooling='mean', gemp=3, detailed=False, whiten=None,
aqe=None, adba=None, threads=8, batch_size=16, save_feats=None,
load_feats=None, dbg=()):
""" Evaluate a trained model (network) on a given dataset.
The dataset is supposed to contain the evaluation code.
"""
print("\n>> Evaluation...")
query_db = db.get_query_db()
# extract DB feats
bdescs = []
qdescs = []
if not load_feats:
trfs_list = [trfs] if isinstance(trfs, str) else trfs
for trfs in trfs_list:
kw = dict(iscuda=net.iscuda, threads=threads, batch_size=batch_size, same_size='Pad' in trfs or 'Crop' in trfs)
bdescs.append(extract_image_features(db, trfs, net, desc="DB", **kw))
# extract query feats
qdescs.append(bdescs[-1] if db is query_db else extract_image_features(query_db, trfs, net, desc="query", **kw))
# pool from multiple transforms (scales)
bdescs = F.normalize(pool(bdescs, pooling, gemp), p=2, dim=1)
qdescs = F.normalize(pool(qdescs, pooling, gemp), p=2, dim=1)
else:
bdescs = np.load(os.path.join(load_feats, 'feats.bdescs.npy'))
if query_db is not db:
qdescs = np.load(os.path.join(load_feats, 'feats.qdescs.npy'))
else:
qdescs = bdescs
if save_feats:
mkdir(save_feats)
np.save(os.path.join(save_feats, 'feats.bdescs.npy'), bdescs.cpu().numpy())
if query_db is not db:
np.save(os.path.join(save_feats, 'feats.qdescs.npy'), qdescs.cpu().numpy())
if whiten is not None:
bdescs = common.whiten_features(tonumpy(bdescs), net.pca, **whiten)
qdescs = common.whiten_features(tonumpy(qdescs), net.pca, **whiten)
if adba is not None:
bdescs = expand_descriptors(bdescs, **args.adba)
if aqe is not None:
qdescs = expand_descriptors(qdescs, db=bdescs, **args.aqe)
scores = matmul(qdescs, bdescs)
del bdescs
del qdescs
res = {}
try:
aps = [db.eval_query_AP(q, s) for q, s in enumerate(tqdm.tqdm(scores, desc='AP'))]
if not isinstance(aps[0], dict):
aps = [float(e) for e in aps]
if detailed:
res['APs'] = aps
# Queries with no relevants have an AP of -1
res['mAP'] = float(np.mean([e for e in aps if e >= 0]))
else:
modes = aps[0].keys()
for mode in modes:
apst = [float(e[mode]) for e in aps]
if detailed:
res['APs'+'-'+mode] = apst
# Queries with no relevants have an AP of -1
res['mAP'+'-'+mode] = float(np.mean([e for e in apst if e >= 0]))
except NotImplementedError:
print(" AP not implemented!")
try:
tops = [db.eval_query_top(q, s) for q, s in enumerate(tqdm.tqdm(scores, desc='top1'))]
if detailed:
res['tops'] = tops
for k in tops[0]:
res['top%d' % k] = float(np.mean([top[k] for top in tops]))
except NotImplementedError:
pass
return res
def load_model(path, iscuda):
checkpoint = common.load_checkpoint(path, iscuda)
net = nets.create_model(pretrained="", **checkpoint['model_options'])
net = common.switch_model_to_cuda(net, iscuda, checkpoint)
net.load_state_dict(checkpoint['state_dict'])
net.preprocess = checkpoint.get('preprocess', net.preprocess)
if 'pca' in checkpoint:
net.pca = checkpoint.get('pca')
return net
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Evaluate a model')
parser.add_argument('--dataset', '-d', type=str, required=True, help='Command to load dataset')
parser.add_argument('--checkpoint', type=str, required=True, help='path to weights')
parser.add_argument('--trfs', type=str, required=False, default='', nargs='+', help='test transforms (can be several)')
parser.add_argument('--pooling', type=str, default="gem", help='pooling scheme if several trf chains')
parser.add_argument('--gemp', type=int, default=3, help='GeM pooling power')
parser.add_argument('--out-json', type=str, default="", help='path to output json')
parser.add_argument('--detailed', action='store_true', help='return detailed evaluation')
parser.add_argument('--save-feats', type=str, default="", help='path to output features')
parser.add_argument('--load-feats', type=str, default="", help='path to load features from')
parser.add_argument('--threads', type=int, default=8, help='number of thread workers')
parser.add_argument('--gpu', type=int, default=0, nargs='+', help='GPU ids')
parser.add_argument('--dbg', default=(), nargs='*', help='debugging options')
# post-processing
parser.add_argument('--whiten', type=str, default='Landmarks_clean', help='applies whitening')
parser.add_argument('--aqe', type=int, nargs='+', help='alpha-query expansion paramenters')
parser.add_argument('--adba', type=int, nargs='+', help='alpha-database augmentation paramenters')
parser.add_argument('--whitenp', type=float, default=0.25, help='whitening power, default is 0.5 (i.e., the sqrt)')
parser.add_argument('--whitenv', type=int, default=None, help='number of components, default is None (i.e. all components)')
parser.add_argument('--whitenm', type=float, default=1.0, help='whitening multiplier, default is 1.0 (i.e. no multiplication)')
args = parser.parse_args()
args.iscuda = common.torch_set_gpu(args.gpu)
if args.aqe is not None:
args.aqe = {'k': args.aqe[0], 'alpha': args.aqe[1]}
if args.adba is not None:
args.adba = {'k': args.adba[0], 'alpha': args.adba[1]}
dl.download_dataset(args.dataset)
dataset = datasets.create(args.dataset)
print("Test dataset:", dataset)
net = load_model(args.checkpoint, args.iscuda)
if args.whiten:
net.pca = net.pca[args.whiten]
args.whiten = {'whitenp': args.whitenp, 'whitenv': args.whitenv, 'whitenm': args.whitenm}
else:
net.pca = None
args.whiten = None
# Evaluate
res = eval_model(dataset, net, args.trfs, pooling=args.pooling, gemp=args.gemp, detailed=args.detailed,
threads=args.threads, dbg=args.dbg, whiten=args.whiten, aqe=args.aqe, adba=args.adba,
save_feats=args.save_feats, load_feats=args.load_feats)
print(' * ' + '\n * '.join(['%s = %g' % p for p in res.items()]))
if args.out_json:
# write to file
try:
data = json.load(open(args.out_json))
except IOError:
data = {}
data[args.dataset] = res
mkdir(args.out_json)
open(args.out_json, 'w').write(json.dumps(data, indent=1))
print("saved to "+args.out_json)
| 9,805 | 36.715385 | 131 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/extract_features.py | import sys
import os
import os.path as osp
import pdb
import json
import tqdm
import numpy as np
import torch
import torch.nn.functional as F
from dirtorch.utils.convenient import mkdir
from dirtorch.utils import common
from dirtorch.utils.common import tonumpy, matmul, pool
from dirtorch.utils.pytorch_loader import get_loader
import dirtorch.test_dir as test
import dirtorch.nets as nets
import dirtorch.datasets as datasets
import dirtorch.datasets.downloader as dl
import pickle as pkl
import hashlib
def extract_features(db, net, trfs, pooling='mean', gemp=3, detailed=False, whiten=None,
threads=8, batch_size=16, output=None, dbg=()):
""" Extract features from trained model (network) on a given dataset.
"""
print("\n>> Extracting features...")
try:
query_db = db.get_query_db()
except NotImplementedError:
query_db = None
# extract DB feats
bdescs = []
qdescs = []
trfs_list = [trfs] if isinstance(trfs, str) else trfs
for trfs in trfs_list:
kw = dict(iscuda=net.iscuda, threads=threads, batch_size=batch_size, same_size='Pad' in trfs or 'Crop' in trfs)
bdescs.append(test.extract_image_features(db, trfs, net, desc="DB", **kw))
# extract query feats
if query_db is not None:
qdescs.append(bdescs[-1] if db is query_db
else test.extract_image_features(query_db, trfs, net, desc="query", **kw))
# pool from multiple transforms (scales)
bdescs = tonumpy(F.normalize(pool(bdescs, pooling, gemp), p=2, dim=1))
if query_db is not None:
qdescs = tonumpy(F.normalize(pool(qdescs, pooling, gemp), p=2, dim=1))
if whiten is not None:
bdescs = common.whiten_features(bdescs, net.pca, **whiten)
if query_db is not None:
qdescs = common.whiten_features(qdescs, net.pca, **whiten)
mkdir(output, isfile=True)
if query_db is db or query_db is None:
np.save(output, bdescs)
else:
o = osp.splitext(output)
np.save(o[0]+'.qdescs'+o[1], qdescs)
np.save(o[0]+'.dbdescs'+o[1], bdescs)
print('Features extracted.')
def load_model(path, iscuda):
checkpoint = common.load_checkpoint(path, iscuda)
net = nets.create_model(pretrained="", **checkpoint['model_options'])
net = common.switch_model_to_cuda(net, iscuda, checkpoint)
net.load_state_dict(checkpoint['state_dict'])
net.preprocess = checkpoint.get('preprocess', net.preprocess)
if 'pca' in checkpoint:
net.pca = checkpoint.get('pca')
return net
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Evaluate a model')
parser.add_argument('--dataset', '-d', type=str, required=True, help='Command to load dataset')
parser.add_argument('--checkpoint', type=str, required=True, help='path to weights')
parser.add_argument('--trfs', type=str, required=False, default='', nargs='+', help='test transforms (can be several)')
parser.add_argument('--pooling', type=str, default="gem", help='pooling scheme if several trf chains')
parser.add_argument('--gemp', type=int, default=3, help='GeM pooling power')
parser.add_argument('--out-json', type=str, default="", help='path to output json')
parser.add_argument('--detailed', action='store_true', help='return detailed evaluation')
parser.add_argument('--output', type=str, default="", help='path to output features')
parser.add_argument('--threads', type=int, default=8, help='number of thread workers')
parser.add_argument('--gpu', type=int, nargs='+', help='GPU ids')
parser.add_argument('--dbg', default=(), nargs='*', help='debugging options')
# post-processing
parser.add_argument('--whiten', type=str, default=None, help='applies whitening')
parser.add_argument('--whitenp', type=float, default=0.5, help='whitening power, default is 0.5 (i.e., the sqrt)')
parser.add_argument('--whitenv', type=int, default=None, help='number of components, default is None (i.e. all components)')
parser.add_argument('--whitenm', type=float, default=1.0, help='whitening multiplier, default is 1.0 (i.e. no multiplication)')
args = parser.parse_args()
args.iscuda = common.torch_set_gpu(args.gpu)
dataset = datasets.create(args.dataset)
print("Dataset:", dataset)
net = load_model(args.checkpoint, args.iscuda)
if args.whiten:
net.pca = net.pca[args.whiten]
args.whiten = {'whitenp': args.whitenp, 'whitenv': args.whitenv, 'whitenm': args.whitenm}
else:
net.pca = None
args.whiten = None
# Evaluate
res = extract_features(dataset, net, args.trfs, pooling=args.pooling, gemp=args.gemp, detailed=args.detailed,
threads=args.threads, dbg=args.dbg, whiten=args.whiten, output=args.output)
| 4,874 | 37.385827 | 131 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/loss.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class APLoss (nn.Module):
""" Differentiable AP loss, through quantization. From the paper:
Learning with Average Precision: Training Image Retrieval with a Listwise Loss
Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza
https://arxiv.org/abs/1906.07589
Input: (N, M) values in [min, max]
label: (N, M) values in {0, 1}
Returns: 1 - mAP (mean AP for each n in {1..N})
Note: typically, this is what you wanna minimize
"""
def __init__(self, nq=25, min=0, max=1):
nn.Module.__init__(self)
assert isinstance(nq, int) and 2 <= nq <= 100
self.nq = nq
self.min = min
self.max = max
gap = max - min
assert gap > 0
# Initialize quantizer as non-trainable convolution
self.quantizer = q = nn.Conv1d(1, 2*nq, kernel_size=1, bias=True)
q.weight = nn.Parameter(q.weight.detach(), requires_grad=False)
q.bias = nn.Parameter(q.bias.detach(), requires_grad=False)
a = (nq-1) / gap
# First half equal to lines passing to (min+x,1) and (min+x+1/a,0) with x = {nq-1..0}*gap/(nq-1)
q.weight[:nq] = -a
q.bias[:nq] = torch.from_numpy(a*min + np.arange(nq, 0, -1)) # b = 1 + a*(min+x)
# First half equal to lines passing to (min+x,1) and (min+x-1/a,0) with x = {nq-1..0}*gap/(nq-1)
q.weight[nq:] = a
q.bias[nq:] = torch.from_numpy(np.arange(2-nq, 2, 1) - a*min) # b = 1 - a*(min+x)
# First and last one as a horizontal straight line
q.weight[0] = q.weight[-1] = 0
q.bias[0] = q.bias[-1] = 1
def forward(self, x, label, qw=None, ret='1-mAP'):
assert x.shape == label.shape # N x M
N, M = x.shape
# Quantize all predictions
q = self.quantizer(x.unsqueeze(1))
q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0) # N x Q x M
nbs = q.sum(dim=-1) # number of samples N x Q = c
rec = (q * label.view(N, 1, M).float()).sum(dim=-1) # number of correct samples = c+ N x Q
prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1)) # precision
rec /= rec.sum(dim=-1).unsqueeze(1) # norm in [0,1]
ap = (prec * rec).sum(dim=-1) # per-image AP
if ret == '1-mAP':
if qw is not None:
ap *= qw # query weights
return 1 - ap.mean()
elif ret == 'AP':
assert qw is None
return ap
else:
raise ValueError("Bad return type for APLoss(): %s" % str(ret))
def measures(self, x, gt, loss=None):
if loss is None:
loss = self.forward(x, gt)
return {'loss_ap': float(loss)}
class TAPLoss (APLoss):
""" Differentiable tie-aware AP loss, through quantization. From the paper:
Learning with Average Precision: Training Image Retrieval with a Listwise Loss
Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza
https://arxiv.org/abs/1906.07589
Input: (N, M) values in [min, max]
label: (N, M) values in {0, 1}
Returns: 1 - mAP (mean AP for each n in {1..N})
Note: typically, this is what you wanna minimize
"""
def __init__(self, nq=25, min=0, max=1, simplified=False):
APLoss.__init__(self, nq=nq, min=min, max=max)
self.simplified = simplified
def forward(self, x, label, qw=None, ret='1-mAP'):
'''N: number of images;
M: size of the descs;
Q: number of bins (nq);
'''
assert x.shape == label.shape # N x M
N, M = x.shape
label = label.float()
Np = label.sum(dim=-1, keepdim=True)
# Quantize all predictions
q = self.quantizer(x.unsqueeze(1))
q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0) # N x Q x M
c = q.sum(dim=-1) # number of samples N x Q = nbs on APLoss
cp = (q * label.view(N, 1, M)).sum(dim=-1) # N x Q number of correct samples = rec on APLoss
C = c.cumsum(dim=-1)
Cp = cp.cumsum(dim=-1)
zeros = torch.zeros(N, 1).to(x.device)
C_1d = torch.cat((zeros, C[:, :-1]), dim=-1)
Cp_1d = torch.cat((zeros, Cp[:, :-1]), dim=-1)
if self.simplified:
aps = cp * (Cp_1d+Cp+1) / (C_1d+C+1) / Np
else:
eps = 1e-8
ratio = (cp - 1).clamp(min=0) / ((c-1).clamp(min=0) + eps)
aps = cp * (c * ratio + (Cp_1d + 1 - ratio * (C_1d + 1)) * torch.log((C + 1) / (C_1d + 1))) / (c + eps) / Np
aps = aps.sum(dim=-1)
assert aps.numel() == N
if ret == '1-mAP':
if qw is not None:
aps *= qw # query weights
return 1 - aps.mean()
elif ret == 'AP':
assert qw is None
return aps
else:
raise ValueError("Bad return type for APLoss(): %s" % str(ret))
def measures(self, x, gt, loss=None):
if loss is None:
loss = self.forward(x, gt)
return {'loss_tap'+('s' if self.simplified else ''): float(loss)}
class TripletMarginLoss(nn.TripletMarginLoss):
""" PyTorch's margin triplet loss
TripletMarginLoss(margin=1.0, p=2, eps=1e-06, swap=False, size_average=True, reduce=True)
"""
def eval_func(self, dp, dn):
return max(0, dp - dn + self.margin)
class TripletLogExpLoss(nn.Module):
r"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shape of all input variables should be
:math:`(N, D)`.
The distance is described in detail in the paper `Improving Pairwise Ranking for Multi-Label
Image Classification`_ by Y. Li et al.
.. math::
L(a, p, n) = log \left( 1 + exp(d(a_i, p_i) - d(a_i, n_i) \right)
Args:
anchor: anchor input tensor
positive: positive input tensor
negative: negative input tensor
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> triplet_loss = nn.TripletLogExpLoss(p=2)
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> input3 = autograd.Variable(torch.randn(100, 128))
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, p=2, eps=1e-6, swap=False):
super(TripletLogExpLoss, self).__init__()
self.p = p
self.eps = eps
self.swap = swap
def forward(self, anchor, positive, negative):
assert anchor.size() == positive.size(), "Input sizes between positive and negative must be equal."
assert anchor.size() == negative.size(), "Input sizes between anchor and negative must be equal."
assert positive.size() == negative.size(), "Input sizes between positive and negative must be equal."
assert anchor.dim() == 2, "Input must be a 2D matrix."
d_p = F.pairwise_distance(anchor, positive, self.p, self.eps)
d_n = F.pairwise_distance(anchor, negative, self.p, self.eps)
if self.swap:
d_s = F.pairwise_distance(positive, negative, self.p, self.eps)
d_n = torch.min(d_n, d_s)
dist = torch.log(1 + torch.exp(d_p - d_n))
loss = torch.mean(dist)
return loss
def eval_func(self, dp, dn):
return np.log(1 + np.exp(dp - dn))
def sim_to_dist(scores):
return 1 - torch.sqrt(2.001 - 2*scores)
class APLoss_dist (APLoss):
def forward(self, x, label, **kw):
d = sim_to_dist(x)
return APLoss.forward(self, d, label, **kw)
class TAPLoss_dist (TAPLoss):
def forward(self, x, label, **kw):
d = sim_to_dist(x)
return TAPLoss.forward(self, d, label, **kw)
| 8,245 | 35.8125 | 120 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/extract_kapture.py | import os
import tqdm
import torch.nn.functional as F
from typing import Optional
os.environ['DB_ROOT'] = ''
from dirtorch.utils import common # noqa: E402
from dirtorch.utils.common import tonumpy, pool # noqa: E402
from dirtorch.datasets.generic import ImageList # noqa: E402
from dirtorch.test_dir import extract_image_features # noqa: E402
from dirtorch.extract_features import load_model # noqa: E402
import kapture # noqa: E402
from kapture.io.csv import kapture_from_dir, get_all_tar_handlers # noqa: E402
from kapture.io.csv import get_feature_csv_fullpath, global_features_to_file # noqa: E402
from kapture.io.records import get_image_fullpath # noqa: E402
from kapture.io.features import get_global_features_fullpath, image_global_features_to_file # noqa: E402
from kapture.io.features import global_features_check_dir # noqa: E402
def extract_kapture_global_features(kapture_root_path: str, net, global_features_type: str,
trfs, pooling='mean', gemp=3, whiten=None,
threads=8, batch_size=16):
""" Extract features from trained model (network) on a given dataset.
"""
print(f'loading {kapture_root_path}')
with get_all_tar_handlers(kapture_root_path,
mode={kapture.Keypoints: 'r',
kapture.Descriptors: 'r',
kapture.GlobalFeatures: 'a',
kapture.Matches: 'r'}) as tar_handlers:
kdata = kapture_from_dir(kapture_root_path, None,
skip_list=[kapture.Keypoints,
kapture.Descriptors,
kapture.Matches,
kapture.Points3d,
kapture.Observations],
tar_handlers=tar_handlers)
root = get_image_fullpath(kapture_root_path, image_filename=None)
assert kdata.records_camera is not None
imgs = [image_name for _, _, image_name in kapture.flatten(kdata.records_camera)]
if kdata.global_features is None:
kdata.global_features = {}
if global_features_type in kdata.global_features:
imgs = [image_name
for image_name in imgs
if image_name not in kdata.global_features[global_features_type]]
if len(imgs) == 0:
print('All global features are already extracted')
return
dataset = ImageList(img_list_path=None, root=root, imgs=imgs)
print(f'\nEvaluation on {dataset}')
# extract DB feats
bdescs = []
trfs_list = [trfs] if isinstance(trfs, str) else trfs
for trfs in trfs_list:
kw = dict(iscuda=net.iscuda, threads=threads, batch_size=batch_size,
same_size='Pad' in trfs or 'Crop' in trfs)
bdescs.append(extract_image_features(dataset, trfs, net, desc="DB", **kw))
# pool from multiple transforms (scales)
bdescs = tonumpy(F.normalize(pool(bdescs, pooling, gemp), p=2, dim=1))
if whiten is not None:
bdescs = common.whiten_features(bdescs, net.pca, **whiten)
print('writing extracted global features')
os.umask(0o002)
gfeat_dtype = bdescs.dtype
gfeat_dsize = bdescs.shape[1]
if global_features_type not in kdata.global_features:
kdata.global_features[global_features_type] = kapture.GlobalFeatures('dirtorch', gfeat_dtype,
gfeat_dsize, 'L2')
global_features_config_absolute_path = get_feature_csv_fullpath(kapture.GlobalFeatures,
global_features_type,
kapture_root_path)
global_features_to_file(global_features_config_absolute_path, kdata.global_features[global_features_type])
else:
assert kdata.global_features[global_features_type].dtype == gfeat_dtype
assert kdata.global_features[global_features_type].dsize == gfeat_dsize
assert kdata.global_features[global_features_type].metric_type == 'L2'
for i in tqdm.tqdm(range(dataset.nimg)):
image_name = dataset.get_key(i)
global_feature_fullpath = get_global_features_fullpath(global_features_type, kapture_root_path, image_name,
tar_handlers)
gfeat_i = bdescs[i, :]
assert gfeat_i.shape == (gfeat_dsize,)
image_global_features_to_file(global_feature_fullpath, gfeat_i)
kdata.global_features[global_features_type].add(image_name)
del gfeat_i
del bdescs
if not global_features_check_dir(kdata.global_features[global_features_type], global_features_type,
kapture_root_path, tar_handlers):
print('global feature extraction ended successfully but not all files were saved')
else:
print('Features extracted.')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Evaluate a model')
parser.add_argument('--kapture-root', type=str, required=True, help='path to kapture root directory')
parser.add_argument('--checkpoint', type=str, required=True, help='path to weights')
parser.add_argument('--global-features-type', default=None,
help='global features type_name, default is basename of checkpoint')
parser.add_argument('--trfs', type=str, required=False, default='',
nargs='+', help='test transforms (can be several)')
parser.add_argument('--pooling', type=str, default="gem", help='pooling scheme if several trf chains')
parser.add_argument('--gemp', type=int, default=3, help='GeM pooling power')
parser.add_argument('--threads', type=int, default=8, help='number of thread workers')
parser.add_argument('--gpu', type=int, nargs='+', help='GPU ids')
# post-processing
parser.add_argument('--whiten', type=str, default=None, help='applies whitening')
parser.add_argument('--whitenp', type=float, default=0.5, help='whitening power, default is 0.5 (i.e., the sqrt)')
parser.add_argument('--whitenv', type=int, default=None,
help='number of components, default is None (i.e. all components)')
parser.add_argument('--whitenm', type=float, default=1.0,
help='whitening multiplier, default is 1.0 (i.e. no multiplication)')
args = parser.parse_args()
args.iscuda = common.torch_set_gpu(args.gpu)
if args.global_features_type is None:
args.global_features_type = os.path.splitext(os.path.basename(args.checkpoint))[0]
print(f'global_features_type set to {args.global_features_type}')
net = load_model(args.checkpoint, args.iscuda)
if args.whiten:
net.pca = net.pca[args.whiten]
args.whiten = {'whitenp': args.whitenp, 'whitenv': args.whitenv, 'whitenm': args.whitenm}
else:
net.pca = None
args.whiten = None
# Evaluate
res = extract_kapture_global_features(args.kapture_root, net, args.global_features_type,
args.trfs, pooling=args.pooling, gemp=args.gemp,
threads=args.threads, whiten=args.whiten)
| 7,658 | 49.388158 | 119 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/nets/rmac_resnet_fpn.py | import pdb
from .backbones.resnet import *
from .layers.pooling import GeneralizedMeanPooling, GeneralizedMeanPoolingP
def l2_normalize(x, axis=-1):
x = F.normalize(x, p=2, dim=axis)
return x
class ResNet_RMAC_FPN(ResNet):
""" ResNet for RMAC (without ROI pooling)
"""
def __init__(self, block, layers, model_name, out_dim=None, norm_features=False,
pooling='gem', gemp=3, center_bias=0, mode=1,
dropout_p=None, without_fc=False, **kwargs):
ResNet.__init__(self, block, layers, 0, model_name, **kwargs)
self.norm_features = norm_features
self.without_fc = without_fc
self.pooling = pooling
self.center_bias = center_bias
self.mode = mode
dim1 = 256 * block.expansion
dim2 = 512 * block.expansion
if out_dim is None: out_dim = dim1 + dim2
#FPN
if self.mode == 1:
self.conv1x5 = nn.Conv2d(dim2, dim1, kernel_size=1, stride=1, bias=False)
self.conv3c4 = nn.Conv2d(dim1, dim1, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if pooling == 'max':
self.adpool = nn.AdaptiveMaxPool2d(output_size=1)
elif pooling == 'avg':
self.adpool = nn.AdaptiveAvgPool2d(output_size=1)
elif pooling == 'gem':
self.adpoolx5 = GeneralizedMeanPoolingP(norm=gemp)
self.adpoolc4 = GeneralizedMeanPoolingP(norm=gemp)
self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None
self.fc = nn.Linear(768 * block.expansion, out_dim)
self.fc_name = 'fc'
self.feat_dim = out_dim
self.detach = False
def forward(self, x):
x4, x5 = ResNet.forward(self, x, -1)
# FPN
if self.mode == 1:
c5 = F.interpolate(x5, size=x4.shape[-2:], mode='nearest')
c5 = self.conv1x5(c5)
c5 = self.relu(c5)
x4 = x4 + c5
x4 = self.conv3c4(x4)
x4 = self.relu(x4)
if self.dropout is not None:
x5 = self.dropout(x5)
x4 = self.dropout(x4)
if self.detach:
# stop the back-propagation here, if needed
x5 = Variable(x5.detach())
x5 = self.id(x5) # fake transformation
x4 = Variable(x4.detach())
x4 = self.id(x4) # fake transformation
# global pooling
x5 = self.adpoolx5(x5)
x4 = self.adpoolc4(x4)
x = torch.cat((x4, x5), 1)
if self.norm_features:
x = l2_normalize(x, axis=1)
x.squeeze_()
if not self.without_fc:
x = self.fc(x)
x = l2_normalize(x, axis=-1)
return x
def resnet18_fpn_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
kwargs.pop('scales', None)
return backbone(BasicBlock, [2, 2, 2, 2], 'resnet18', **kwargs)
def resnet50_fpn_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 4, 6, 3], 'resnet50', **kwargs)
def resnet101_fpn_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 4, 23, 3], 'resnet101', **kwargs)
def resnet101_fpn0_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 4, 23, 3], 'resnet101', mode=0, **kwargs)
def resnet152_fpn_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 8, 36, 3], 'resnet152', **kwargs)
| 3,816 | 25.692308 | 96 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/nets/rmac_resnext.py | from .backbones.resnext101_features import *
from .layers.pooling import GeneralizedMeanPooling, GeneralizedMeanPoolingP
def l2_normalize(x, axis=-1):
x = F.normalize(x, p=2, dim=axis)
return x
class ResNext_RMAC(nn.Module):
""" ResNet for RMAC (without ROI pooling)
"""
def __init__(self, backbone, out_dim=2048, norm_features=False,
pooling='gem', gemp=3, center_bias=0,
dropout_p=None, without_fc=False, **kwargs):
super(ResNeXt_RMAC, self).__init__()
self.backbone = backbone
self.norm_features = norm_features
self.without_fc = without_fc
self.pooling = pooling
self.center_bias = center_bias
if pooling == 'max':
self.adpool = nn.AdaptiveMaxPool2d(output_size=1)
elif pooling == 'avg':
self.adpool = nn.AdaptiveAvgPool2d(output_size=1)
elif pooling == 'gem':
self.adpool = GeneralizedMeanPoolingP(norm=gemp)
self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None
self.last_linear = nn.Linear(2048, out_dim)
self.fc_name = 'last_linear'
self.feat_dim = out_dim
self.detach = False
def forward(self, x):
x = ResNet.forward(self, x)
if self.dropout is not None:
x = self.dropout(x)
if self.detach:
# stop the back-propagation here, if needed
x = Variable(x.detach())
x = self.id(x) # fake transformation
if self.center_bias > 0:
b = self.center_bias
bias = 1 + torch.FloatTensor([[[[0,0,0,0],[0,b,b,0],[0,b,b,0],[0,0,0,0]]]]).to(x.device)
bias = torch.nn.functional.interpolate(bias, size=x.shape[-2:], mode='bilinear', align_corners=True)
x = x*bias
# global pooling
x = self.adpool(x)
if self.norm_features:
x = l2_normalize(x, axis=1)
x.squeeze_()
if not self.without_fc:
x = self.fc(x)
x = l2_normalize(x, axis=-1)
return x
def resnet18_rmac(backbone=ResNet_RMAC, **kwargs):
kwargs.pop('scales', None)
return backbone(BasicBlock, [2, 2, 2, 2], 'resnet18', **kwargs)
def resnet50_rmac(backbone=ResNet_RMAC, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 4, 6, 3], 'resnet50', **kwargs)
def resnet101_rmac(backbone=ResNet_RMAC, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 4, 23, 3], 'resnet101', **kwargs)
def resnet152_rmac(backbone=ResNet_RMAC, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 8, 36, 3], 'resnet152', **kwargs)
| 2,731 | 23.176991 | 112 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/nets/rmac_resnet.py | import pdb
import torch
from .backbones.resnet import *
from .layers.pooling import GeneralizedMeanPooling, GeneralizedMeanPoolingP
def l2_normalize(x, axis=-1):
x = F.normalize(x, p=2, dim=axis)
return x
class ResNet_RMAC(ResNet):
""" ResNet for RMAC (without ROI pooling)
"""
def __init__(self, block, layers, model_name, out_dim=2048, norm_features=False,
pooling='gem', gemp=3, center_bias=0,
dropout_p=None, without_fc=False, **kwargs):
ResNet.__init__(self, block, layers, 0, model_name, **kwargs)
self.norm_features = norm_features
self.without_fc = without_fc
self.pooling = pooling
self.center_bias = center_bias
if pooling == 'max':
self.adpool = nn.AdaptiveMaxPool2d(output_size=1)
elif pooling == 'avg':
self.adpool = nn.AdaptiveAvgPool2d(output_size=1)
elif pooling.startswith('gem'):
self.adpool = GeneralizedMeanPoolingP(norm=gemp)
else:
raise ValueError(pooling)
self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None
self.fc = nn.Linear(512 * block.expansion, out_dim)
self.fc_name = 'fc'
self.feat_dim = out_dim
self.detach = False
def forward(self, x):
bs, _, H, W = x.shape
x = ResNet.forward(self, x)
if self.dropout is not None:
x = self.dropout(x)
if self.detach:
# stop the back-propagation here, if needed
x = Variable(x.detach())
x = self.id(x) # fake transformation
if self.center_bias > 0:
b = self.center_bias
bias = 1 + torch.FloatTensor([[[[0,0,0,0],[0,b,b,0],[0,b,b,0],[0,0,0,0]]]]).to(x.device)
bias = torch.nn.functional.interpolate(bias, size=x.shape[-2:], mode='bilinear', align_corners=True)
x = x*bias
# global pooling
x = self.adpool(x)
if self.norm_features:
x = l2_normalize(x, axis=1)
x.squeeze_()
if not self.without_fc:
x = self.fc(x)
x = l2_normalize(x, axis=-1)
return x
def resnet18_rmac(backbone=ResNet_RMAC, **kwargs):
kwargs.pop('scales', None)
return backbone(BasicBlock, [2, 2, 2, 2], 'resnet18', **kwargs)
def resnet50_rmac(backbone=ResNet_RMAC, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 4, 6, 3], 'resnet50', **kwargs)
def resnet101_rmac(backbone=ResNet_RMAC, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 4, 23, 3], 'resnet101', **kwargs)
def resnet152_rmac(backbone=ResNet_RMAC, **kwargs):
kwargs.pop('scales', None)
return backbone(Bottleneck, [3, 8, 36, 3], 'resnet152', **kwargs)
| 2,838 | 23.059322 | 112 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/nets/__init__.py | ''' List all architectures at the bottom of this file.
To list all available architectures, use:
python -m nets
'''
import os
import pdb
import torch
from collections import OrderedDict
internal_funcs = set(globals().keys())
from .backbones.resnet import resnet101, resnet50, resnet18, resnet152
from .rmac_resnet import resnet18_rmac, resnet50_rmac, resnet101_rmac, resnet152_rmac
from .rmac_resnet_fpn import resnet18_fpn_rmac, resnet50_fpn_rmac, resnet101_fpn_rmac, resnet101_fpn0_rmac, resnet152_fpn_rmac
model_names = {name for name in globals()
if name.islower() and not name.startswith("__")
and name not in internal_funcs
and callable(globals()[name])}
def create_model(arch, pretrained='', delete_fc=False, *args, **kwargs):
''' Create an empty network for RMAC.
arch : str
name of the function to call
kargs : list
mandatory arguments
kwargs : dict
optional arguments
'''
# creating model
if arch not in model_names:
raise NameError("unknown model architecture '%s'\nSelect one in %s" % (
arch, ','.join(model_names)))
model = globals()[arch](*args, **kwargs)
model.preprocess = dict(
mean=model.rgb_means,
std=model.rgb_stds,
input_size=max(model.input_size)
)
if os.path.isfile(pretrained or ''):
class watcher:
class AverageMeter:
pass
class Watch:
pass
import sys
sys.modules['utils.watcher'] = watcher
weights = torch.load(pretrained, map_location=lambda storage, loc: storage)['state_dict']
load_pretrained_weights(model, weights, delete_fc=delete_fc)
elif pretrained:
assert hasattr(model, 'load_pretrained_weights'), 'Model %s must be initialized with a valid model file (not %s)' % (arch, pretrained)
model.load_pretrained_weights(pretrained)
return model
def load_pretrained_weights(net, state_dict, delete_fc=False):
""" Load the pretrained weights (chop the last FC layer if needed)
If layers are missing or of wrong shape, will not load them.
"""
new_dict = OrderedDict()
for k, v in list(state_dict.items()):
if k.startswith('module.'):
k = k.replace('module.', '')
new_dict[k] = v
# Add missing weights from the network itself
d = net.state_dict()
for k, v in list(d.items()):
if k not in new_dict:
if not k.endswith('num_batches_tracked'):
print("Loading weights for %s: Missing layer %s" % (type(net).__name__, k))
new_dict[k] = v
elif v.shape != new_dict[k].shape:
print("Loading weights for %s: Bad shape for layer %s, skipping" % (type(net).__name__, k))
new_dict[k] = v
net.load_state_dict(new_dict)
# Remove the FC layer if size doesn't match
if delete_fc:
fc = net.fc_name
del new_dict[fc+'.weight']
del new_dict[fc+'.bias']
| 3,084 | 23.484127 | 142 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/nets/layers/pooling.py | import pdb
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.nn.modules import Module
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import math
class GeneralizedMeanPooling(Module):
r"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-6):
super(GeneralizedMeanPooling, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def forward(self, x):
x = x.clamp(min=self.eps).pow(self.p)
return F.adaptive_avg_pool2d(x, self.output_size).pow(1. / self.p)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ str(self.p) + ', ' \
+ 'output_size=' + str(self.output_size) + ')'
class GeneralizedMeanPoolingP(GeneralizedMeanPooling):
""" Same, but norm is trainable
"""
def __init__(self, norm=3, output_size=1, eps=1e-6):
super(GeneralizedMeanPoolingP, self).__init__(norm, output_size, eps)
self.p = Parameter(torch.ones(1) * norm)
| 1,815 | 30.859649 | 106 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/nets/backbones/resnet.py | import torch.nn as nn
import torch
import math
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
''' Standard bottleneck block
input = inplanes * H * W
middle = planes * H/stride * W/stride
output = 4*planes * H/stride * W/stride
'''
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def reset_weights(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class ResNet(nn.Module):
""" A standard ResNet.
"""
def __init__(self, block, layers, fc_out, model_name, self_similarity_radius=None, self_similarity_version=2):
nn.Module.__init__(self)
self.model_name = model_name
# default values for a network pre-trained on imagenet
self.rgb_means = [0.485, 0.456, 0.406]
self.rgb_stds = [0.229, 0.224, 0.225]
self.input_size = (3, 224, 224)
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], self_similarity_radius=self_similarity_radius, self_similarity_version=self_similarity_version)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, self_similarity_radius=self_similarity_radius, self_similarity_version=self_similarity_version)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, self_similarity_radius=self_similarity_radius, self_similarity_version=self_similarity_version)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, self_similarity_radius=self_similarity_radius, self_similarity_version=self_similarity_version)
reset_weights(self)
self.fc = None
self.fc_out = fc_out
if self.fc_out > 0:
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(512 * block.expansion, fc_out)
self.fc_name = 'fc'
def _make_layer(self, block, planes, blocks, stride=1, self_similarity_radius=None, self_similarity_version=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes=planes, stride=stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
if self_similarity_radius:
if self_similarity_version == 1:
from . self_sim import SelfSimilarity1
layers.append(SelfSimilarity1(self_similarity_radius, self.inplanes))
else:
from . self_sim import SelfSimilarity2
layers.append(SelfSimilarity2(self_similarity_radius, self.inplanes))
return nn.Sequential(*layers)
def forward(self, x, out_layer=0):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if out_layer==-1:
return x, self.layer4(x)
x = self.layer4(x)
if self.fc_out > 0:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def load_pretrained_weights(self, pretrain_code):
if pretrain_code == 'imagenet':
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
else:
raise NameError("unknown pretraining code '%s'" % pretrain_code)
print("Loading ImageNet pretrained weights for %s" % pretrain_code)
assert self.model_name in model_urls, "Unknown model '%s'" % self.model_name
model_dir='dirtorch/data/models/classification/'
import os, stat # give group permission
try: os.makedirs(model_dir)
except OSError: pass
import torch.utils.model_zoo as model_zoo
state_dict = model_zoo.load_url(model_urls[self.model_name], model_dir=model_dir)
from . import load_pretrained_weights
load_pretrained_weights(self, state_dict)
def resnet18(out_dim=2048):
"""Constructs a ResNet-18 model.
"""
net = ResNet(BasicBlock, [2, 2, 2, 2], out_dim, 'resnet18')
return net
def resnet50(out_dim=2048):
"""Constructs a ResNet-50 model.
"""
net = ResNet(Bottleneck, [3, 4, 6, 3], out_dim, 'resnet50')
return net
def resnet101(out_dim=2048):
"""Constructs a ResNet-101 model.
"""
net = ResNet(Bottleneck, [3, 4, 23, 3], out_dim, 'resnet101')
return net
def resnet152(out_dim=2048):
"""Constructs a ResNet-152 model.
"""
net = ResNet(Bottleneck, [3, 8, 36, 3], out_dim, 'resnet152')
return net
| 7,827 | 33.333333 | 167 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/nets/backbones/resnext101_features.py | from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
from torch.autograd import Variable
from functools import reduce
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
resnext101_32x4d_features = nn.Sequential( # Sequential,
nn.Conv2d(3,64,(7, 7),(2, 2),(3, 3),1,1,bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((3, 3),(2, 2),(1, 1)),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(64,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
nn.Sequential( # Sequential,
nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(2, 2),(1, 1),1,32,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
nn.Sequential( # Sequential,
nn.Conv2d(256,512,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(2, 2),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
nn.Sequential( # Sequential,
nn.Conv2d(512,1024,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(2, 2),(1, 1),1,32,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
nn.Sequential( # Sequential,
nn.Conv2d(1024,2048,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
)
)
resnext101_64x4d_features = nn.Sequential(#Sequential,
nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3), 1, 1, bias = False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((3, 3), (2, 2), (1, 1)),
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(64, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
),
nn.Sequential(#Sequential,
nn.Conv2d(64, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
),
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
),
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
nn.Sequential(#Sequential,
nn.Conv2d(256, 512, (1, 1), (2, 2), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
),
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (2, 2), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
nn.Sequential(#Sequential,
nn.Conv2d(512, 1024, (1, 1), (2, 2), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
),
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048, 2048, (3, 3), (2, 2), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
),
nn.Sequential(#Sequential,
nn.Conv2d(1024, 2048, (1, 1), (2, 2), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
),
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048, 2048, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap(lambda x: x, #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048, 2048, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), #Identity,
),
LambdaReduce(lambda x, y: x + y), #CAddTable,
nn.ReLU(),
),
)
)
| 57,499 | 41.942494 | 91 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/utils/pytorch_loader.py | import pdb
from PIL import Image
import numpy as np
import random
import torch
import torch.utils.data as data
def get_loader( dataset, trf_chain, iscuda,
preprocess = {}, # variables for preprocessing (input_size, mean, std, ...)
output = ('img','label'),
batch_size = None,
threads = 1,
shuffle = True,
balanced = 0, use_all = False,
totensor = True,
**_useless_kw):
''' Get a data loader, given the dataset and some parameters.
Parameters
----------
dataset : Dataset().
Class containing all images and labels.
trf_chain : list
list of transforms
iscuda : bool
output : tuple of str
tells what to return. 'img', 'label', ... See PytorchLoader().
preprocess : dict
{input_size:..., mean=..., std:..., ...}
batch_size : int
threads : int
shuffle : int
balanced : float in [0,1]
if balanced>0, then will pick dataset samples such that each class is equally represented.
use_all : bool
if True, will force to use all dataset samples at least once (even if balanced>0)
Returns
-------
a pytorch loader.
'''
from . import transforms
trf_chain = transforms.create(trf_chain, to_tensor=True, **preprocess)
sampler = None
if balanced:
sampler = BalancedSampler(dataset, use_all=use_all, balanced=balanced)
shuffle = False
loader = PytorchLoader(dataset, transform=trf_chain, output=output)
if threads == 1:
return loader
else:
return data.DataLoader(
loader,
batch_size = batch_size,
shuffle = shuffle,
sampler = sampler,
num_workers = threads,
pin_memory = iscuda)
class PytorchLoader (data.Dataset):
"""A pytorch dataset-loader
Args:
dataset (object): dataset inherited from dataset.Dataset()
transform (deprecated, callable): pytorch transforms. Use img_and_target_transform instead.
target_transform (deprecated, callable): applied on target. Use img_and_target_transform instead.
img_and_target_transform (callable):
applied on dict(img=, label=, bbox=, ...)
and should return a similar dictionary.
Attributes:
dataset (object): subclass of dataset.Dataset()
"""
def __init__(self, dataset, transform=None,
target_transform=None,
img_and_target_transform=None,
output=['img','label']):
self.dataset = dataset
self.transform = transform
self.target_transform = target_transform
self.img_and_target_transform = img_and_target_transform
self.output = output
def __getitem__(self, index):
img_filename = self.dataset.get_filename(index)
img_and_label = dict(
img_filename = img_filename,
img_key = self.dataset.get_key(index),
img = self.dataset.get_image(index),
label = try_to_get(self.dataset.get_label, index, toint=True) )
if self.img_and_target_transform:
# label depends on image (bbox, polygons, etc)
assert self.transform is None
assert self.target_transform is None
# add optional attributes
if 'bbox' in self.output:
bbox = try_to_get(self.dataset.get_bbox, index)
if bbox: img_and_label['bbox'] = bbox
if any(a.endswith('_map') for a in self.output):
original_polygons = try_to_get(self.dataset.get_polygons, index, toint=True)
if original_polygons is not None:
img_and_label['polygons'] = original_polygons
img_and_label = self.img_and_target_transform(img_and_label)
if original_polygons is not None:
transformed_polygons = img_and_label['polygons']
imsize = img_and_label['img'].size
if not isinstance(imsize, tuple):
imsize = imsize()[-2:][::-1] # returns h,w
if 'label_map' in self.output:
pixlabel = self.dataset.get_label_map(index, imsize, polygons=transformed_polygons)
img_and_label['label_map'] = pixlabel.astype(int)
# instance level attributes
for out_key in self.output:
for type in ['_instance_map', '_angle_map']:
if not out_key.endswith(type): continue
cls = out_key[:-len(type)]
get_func = getattr(self.dataset,'get'+type)
pixlabel = get_func(index, cls, imsize, polygons=transformed_polygons)
img_and_label[out_key] = pixlabel
else:
# just plain old transform, no influence on labels
if self.transform is not None:
img_and_label['img'] = self.transform(img_and_label['img'])
if self.target_transform:
img_and_label['label'] = self.target_transform(img_and_label['label'])
for o in self.output:
assert img_and_label.get(o) is not None, "Missing field %s for img %s" % (o,img_filename)
return [img_and_label[o] for o in self.output]
def __len__(self):
return len(self.dataset)
def __repr__(self):
fmt_str = 'Dataset ' + self.dataset.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: %d\n' % len(self.dataset)
fmt_str += ' Root Location: %s\n' % self.dataset.__dict__.get('root','(unknown)')
if self.img_and_target_transform:
tmp = ' Image_and_target transforms: '
fmt_str += '{0}{1}\n'.format(tmp, repr(self.img_and_target_transform).replace('\n', '\n' + ' ' * len(tmp)))
if self.transform:
tmp = ' Image transforms: '
fmt_str += '{0}{1}\n'.format(tmp, repr(self.transform).replace('\n', '\n' + ' ' * len(tmp)))
if self.target_transform:
tmp = ' Target transforms: '
fmt_str += '{0}{1}\n'.format(tmp, repr(self.target_transform).replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class BalancedSampler (data.sampler.Sampler):
""" Data sampler that will provide an equal number of each class
to the network.
size: float in [0,2]
specify the size increase/decrease w.r.t to the original dataset.
1 means that the over-classes (with more than median n_per_class images)
will have less items, but conversely, under-classes will have more items.
balanced: float in [0,1]
specify whether the balance constraint should be respected firmly or not.
if ==1: balance is exactly respected; if ==0, same as dataset (no change).
use_all: bool
if True, will use all images that a class have, even if it is higher than
what the algorithm wanted to use.
"""
def __init__(self, dataset, size=1.0, balanced=1.0, use_all=False):
assert 0 <= size <= 2
assert 0 <= balanced <= 1
# enumerate class images
self.cls_imgs = [[] for i in range(dataset.nclass)]
for i in range(len(dataset)):
label = dataset.get_label(i, toint=True)
self.cls_imgs[label].append(i)
# decide on the number of example per class
self.npc = np.percentile([len(imgs) for imgs in self.cls_imgs], max(0,min(50*size,100)))
self.balanced = balanced
self.use_all = use_all
self.nelem = int(0.5 + self.npc * dataset.nclass) # initial estimate
def __iter__(self):
indices = []
for i,imgs in enumerate(self.cls_imgs):
np.random.shuffle(imgs) # randomize
# target size for this class
# target = logarithmic mean
b = self.balanced
if len(imgs):
target = 2**(b*np.log2(self.npc) + (1-b)*np.log2(len(imgs)))
target = int(0.5 + target)
else:
target = 0
if self.use_all: # use all images
target = max(target, len(imgs))
# augment classes until target
res = []
while len(res) < target:
res += imgs
res = res[:target] # cut
indices += res
np.random.shuffle(indices)
self.nelem = len(indices)
return iter(indices)
def __len__(self):
return self.nelem
### Helper functions with get_loader() and DatasetLoader()
def load_one_img( loader ):
''' Helper to iterate on get_loader()
loader: output of get_loader()
'''
iterator = iter(loader)
batch = []
while iterator:
if not batch: # refill
things = next(iterator)
batch = list(zip(*[t.numpy() if torch.is_tensor(t) else t for t in things]))
yield batch.pop(0)
def tensor2img(tensor, model):
""" convert a numpy tensor to a PIL Image
(undo the ToTensor() and Normalize() transforms)
"""
mean = model.preprocess['mean']
std = model.preprocess['std']
if not isinstance(tensor, np.ndarray):
if not isinstance(tensor, torch.Tensor):
tensor = tensor.data
tensor = tensor.squeeze().cpu().numpy()
res = np.uint8(np.clip(255*((tensor.transpose(1,2,0) * std) + mean), 0, 255))
from PIL import Image
return Image.fromarray(res)
def test_loader_speed(loader_):
''' Test the speed of a data loader
'''
from tqdm import tqdm
loader = load_one_img(loader_)
for _ in tqdm(loader):
pass
pdb.set_trace()
def try_to_get(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except NotImplementedError:
return None
| 9,903 | 31.686469 | 119 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/utils/common.py | import os
import sys
import pdb
import shutil
from collections import OrderedDict
import numpy as np
import sklearn.decomposition
import torch
import torch.nn.functional as F
try:
import torch
import torch.nn as nn
except ImportError:
pass
def typename(x):
return type(x).__module__
def tonumpy(x):
if typename(x) == torch.__name__:
return x.cpu().numpy()
else:
return x
def matmul(A, B):
if typename(A) == np.__name__:
B = tonumpy(B)
scores = np.dot(A, B.T)
elif typename(B) == torch.__name__:
scores = torch.matmul(A, B.t()).cpu().numpy()
else:
raise TypeError("matrices must be either numpy or torch type")
return scores
def pool(x, pooling='mean', gemp=3):
if len(x) == 1:
return x[0]
x = torch.stack(x, dim=0)
if pooling == 'mean':
return torch.mean(x, dim=0)
elif pooling == 'gem':
def sympow(x, p, eps=1e-6):
s = torch.sign(x)
return (x*s).clamp(min=eps).pow(p) * s
x = sympow(x, gemp)
x = torch.mean(x, dim=0)
return sympow(x, 1/gemp)
else:
raise ValueError("Bad pooling mode: "+str(pooling))
def torch_set_gpu(gpus, seed=None, randomize=True):
if type(gpus) is int:
gpus = [gpus]
assert gpus, 'error: empty gpu list, use --gpu N N ...'
cuda = all(gpu >= 0 for gpu in gpus)
if cuda:
if any(gpu >= 1000 for gpu in gpus):
visible_gpus = [int(gpu) for gpu in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(visible_gpus[gpu-1000]) for gpu in gpus])
else:
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(gpu) for gpu in gpus])
assert cuda and torch.cuda.is_available(), "%s has GPUs %s unavailable" % (
os.environ['HOSTNAME'], os.environ['CUDA_VISIBLE_DEVICES'])
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.fastest = True
print('Launching on GPUs ' + os.environ['CUDA_VISIBLE_DEVICES'])
else:
print('Launching on >> CPU <<')
torch_set_seed(seed, cuda, randomize=randomize)
return cuda
def torch_set_seed(seed, cuda, randomize=True):
if seed:
# this makes it 3x SLOWER but deterministic
torch.backends.cudnn.enabled = False
if randomize and not seed:
import time
try:
seed = int(np.uint32(hash(time.time())))
except OverflowError:
seed = int.from_bytes(os.urandom(4), byteorder='little', signed=False)
if seed:
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
def save_checkpoint(state, is_best, filename):
try:
dirs = os.path.split(filename)[0]
if not os.path.isdir(dirs):
os.makedirs(dirs)
torch.save(state, filename)
if is_best:
filenamebest = filename+'.best'
shutil.copyfile(filename, filenamebest)
filename = filenamebest
print("saving to "+filename)
except:
print("Error: Could not save checkpoint at %s, skipping" % filename)
def load_checkpoint(filename, iscuda=False):
if not filename:
return None
assert os.path.isfile(filename), "=> no checkpoint found at '%s'" % filename
checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)
print("=> loading checkpoint '%s'" % filename, end='')
for key in ['epoch', 'iter', 'current_iter']:
if key in checkpoint:
print(" (%s %d)" % (key, checkpoint[key]), end='')
print()
new_dict = OrderedDict()
for k, v in list(checkpoint['state_dict'].items()):
if k.startswith('module.'):
k = k[7:]
new_dict[k] = v
checkpoint['state_dict'] = new_dict
if iscuda and 'optimizer' in checkpoint:
try:
for state in checkpoint['optimizer']['state'].values():
for k, v in state.items():
if iscuda and torch.is_tensor(v):
state[k] = v.cuda()
except RuntimeError as e:
print("RuntimeError:", e, "(machine %s, GPU %s)" % (
os.environ['HOSTNAME'], os.environ['CUDA_VISIBLE_DEVICES']),
file=sys.stderr)
sys.exit(1)
return checkpoint
def switch_model_to_cuda(model, iscuda=True, checkpoint=None):
if iscuda:
if checkpoint:
checkpoint['state_dict'] = {'module.' + k: v for k, v in checkpoint['state_dict'].items()}
try:
model = torch.nn.DataParallel(model)
# copy attributes automatically
for var in dir(model.module):
if var.startswith('_'):
continue
val = getattr(model.module, var)
if isinstance(val, (bool, int, float, str, dict)) or \
(callable(val) and var.startswith('get_')):
setattr(model, var, val)
model.cuda()
model.isasync = True
except RuntimeError as e:
print("RuntimeError:", e, "(machine %s, GPU %s)" % (
os.environ['HOSTNAME'], os.environ['CUDA_VISIBLE_DEVICES']),
file=sys.stderr)
sys.exit(1)
model.iscuda = iscuda
return model
def model_size(model):
''' Computes the number of parameters of the model
'''
size = 0
for weights in model.state_dict().values():
size += np.prod(weights.shape)
return size
def freeze_batch_norm(model, freeze=True, only_running=False):
model.freeze_bn = bool(freeze)
if not freeze:
return
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
# Eval mode freezes the running mean and std
m.eval()
for param in m.named_parameters():
if only_running:
# Weight and bias can be updated
param[1].requires_grad = True
else:
# Freeze the weight and bias
param[1].requires_grad = False
def variables(inputs, iscuda, not_on_gpu=[]):
''' convert several Tensors to cuda.Variables
Tensor whose index are in not_on_gpu stays on cpu.
'''
inputs_var = []
for i, x in enumerate(inputs):
if i not in not_on_gpu and not isinstance(x, (tuple, list)):
if iscuda:
x = x.cuda(non_blocking=True)
x = torch.autograd.Variable(x)
inputs_var.append(x)
return inputs_var
def transform(pca, X, whitenp=0.5, whitenv=None, whitenm=1.0, use_sklearn=True):
if use_sklearn:
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/base.py#L99
if pca.mean_ is not None:
X = X - pca.mean_
X_transformed = np.dot(X, pca.components_[:whitenv].T)
if pca.whiten:
X_transformed /= whitenm * np.power(pca.explained_variance_[:whitenv], whitenp)
else:
X = X - pca['means']
X_transformed = np.dot(X, pca['W'])
return X_transformed
def whiten_features(X, pca, l2norm=True, whitenp=0.5, whitenv=None, whitenm=1.0, use_sklearn=True):
res = transform(pca, X, whitenp=whitenp, whitenv=whitenv, whitenm=whitenm, use_sklearn=use_sklearn)
if l2norm:
res = res / np.expand_dims(np.linalg.norm(res, axis=1), axis=1)
return res
| 7,499 | 30.120332 | 104 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/utils/evaluation.py | '''Evaluation metrics
'''
import pdb
import numpy as np
import torch
def accuracy_topk(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k
output: torch.FloatTensoror np.array(float)
shape = B * L [* H * W]
L: number of possible labels
target: torch.IntTensor or np.array(int)
shape = B [* H * W]
ground-truth labels
"""
if isinstance(output, np.ndarray):
pred = (-output).argsort(axis=1)
target = np.expand_dims(target, axis=1)
correct = (pred == target)
res = []
for k in topk:
correct_k = correct[:, :k].sum()
res.append(correct_k / target.size)
if isinstance(output, torch.Tensor):
_, pred = output.topk(max(topk), 1, True, True)
correct = pred.eq(target.unsqueeze(1))
res = []
for k in topk:
correct_k = correct[:, :k].float().view(-1).sum(0)
res.append(correct_k.mul_(1.0 / target.numel()))
return res
def compute_AP(label, score):
from sklearn.metrics import average_precision_score
return average_precision_score(label, score)
def compute_average_precision(positive_ranks):
"""
Extracted from: https://github.com/tensorflow/models/blob/master/research/delf/delf/python/detect_to_retrieve/dataset.py
Computes average precision according to dataset convention.
It assumes that `positive_ranks` contains the ranks for all expected positive
index images to be retrieved. If `positive_ranks` is empty, returns
`average_precision` = 0.
Note that average precision computation here does NOT use the finite sum
method (see
https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision)
which is common in information retrieval literature. Instead, the method
implemented here integrates over the precision-recall curve by averaging two
adjacent precision points, then multiplying by the recall step. This is the
convention for the Revisited Oxford/Paris datasets.
Args:
positive_ranks: Sorted 1D NumPy integer array, zero-indexed.
Returns:
average_precision: Float.
"""
average_precision = 0.0
num_expected_positives = len(positive_ranks)
if not num_expected_positives:
return average_precision
recall_step = 1.0 / num_expected_positives
for i, rank in enumerate(positive_ranks):
if not rank:
left_precision = 1.0
else:
left_precision = i / rank
right_precision = (i + 1) / (rank + 1)
average_precision += (left_precision + right_precision) * recall_step / 2
return average_precision
def compute_average_precision_quantized(labels, idx, step=0.01):
recall_checkpoints = np.arange(0, 1, step)
def mymax(x, default):
return np.max(x) if len(x) else default
Nrel = np.sum(labels)
if Nrel == 0:
return 0
recall = np.cumsum(labels[idx])/float(Nrel)
irange = np.arange(1, len(idx)+1)
prec = np.cumsum(labels[idx]).astype(np.float32) / irange
precs = np.array([mymax(prec[np.where(recall > v)], 0) for v in recall_checkpoints])
return np.mean(precs)
def pixelwise_iou(output, target):
""" For each image, for each label, compute the IoU between
"""
assert False
| 3,382 | 31.219048 | 124 | py |
deep-image-retrieval | deep-image-retrieval-master/dirtorch/utils/transforms.py | import pdb
import numpy as np
from PIL import Image, ImageOps
import torchvision.transforms as tvf
import random
from math import ceil
from . import transforms_tools as F
def create(cmd_line, to_tensor=False, **vars):
''' Create a sequence of transformations.
cmd_line: (str)
Comma-separated list of transformations.
Ex: "Rotate(10), Scale(256)"
to_tensor: (bool)
Whether to add the "ToTensor(), Normalize(mean, std)"
automatically to the end of the transformation string
vars: (dict)
dictionary of global variables.
'''
if to_tensor:
if not cmd_line:
cmd_line = "ToTensor(), Normalize(mean=mean, std=std)"
elif to_tensor and 'ToTensor' not in cmd_line:
cmd_line += ", ToTensor(), Normalize(mean=mean, std=std)"
assert isinstance(cmd_line, str)
cmd_line = "tvf.Compose([%s])" % cmd_line
try:
return eval(cmd_line, globals(), vars)
except Exception as e:
raise SyntaxError("Cannot interpret this transform list: %s\nReason: %s" % (cmd_line, e))
class Identity (object):
""" Identity transform. It does nothing!
"""
def __call__(self, inp):
return inp
class Pad(object):
""" Pads the shortest side of the image to a given size
If size is shorter than the shortest image, then the image will be untouched
"""
def __init__(self, size, color=(127,127,127)):
self.size = size
assert len(color) == 3
if not all(isinstance(c,int) for c in color):
color = tuple([int(255*c) for c in color])
self.color = color
def __call__(self, inp):
img = F.grab_img(inp)
w, h = img.size
if w >= h:
newh = max(h,self.size)
neww = w
else:
newh = h
neww = max(w,self.size)
if (neww,newh) != img.size:
img2 = Image.new('RGB', (neww,newh), self.color)
img2.paste(img, ((neww-w)//2,(newh-h)//2) )
img = img2
return F.update_img_and_labels(inp, img, aff=(1,0,0,0,1,0))
class PadSquare (object):
""" Pads the image to a square size
The dimension of the output image will be equal to size x size
If size is None, then the image will be padded to the largest dimension
If size is smaller than the original image size, the image will be cropped
"""
def __init__(self, size=None, color=(127,127,127)):
self.size = size
assert len(color) == 3
if not all(isinstance(c,int) for c in color):
color = tuple([int(255*c) for c in color])
self.color = color
def __call__(self, inp):
img = F.grab_img(inp)
w, h = img.size
s = self.size or max(w, h)
if (s,s) != img.size:
img2 = Image.new('RGB', (s,s), self.color)
img2.paste(img, ((s-w)//2,(s-h)//2) )
img = img2
return F.update_img_and_labels(inp, img, aff=(1,0,0,0,1,0))
class RandomBorder (object):
""" Expands the image with a random size border
"""
def __init__(self, min_size, max_size, color=(127,127,127)):
assert isinstance(min_size, int) and min_size >= 0
assert isinstance(max_size, int) and min_size <= max_size
self.min_size = min_size
self.max_size = max_size
assert len(color) == 3
if not all(isinstance(c,int) for c in color):
color = tuple([int(255*c) for c in color])
self.color = color
def __call__(self, inp):
img = F.grab_img(inp)
bh = random.randint(self.min_size, self.max_size)
bw = random.randint(self.min_size, self.max_size)
img = ImageOps.expand(img, border=(bw,bh,bw,bh), fill=self.color)
return F.update_img_and_labels(inp, img, aff=(1,0,0,0,1,0))
class Scale (object):
""" Rescale the input PIL.Image to a given size.
Same as torchvision.Scale
The smallest dimension of the resulting image will be = size.
if largest == True: same behaviour for the largest dimension.
if not can_upscale: don't upscale
if not can_downscale: don't downscale
"""
def __init__(self, size, interpolation=Image.BILINEAR, largest=False, can_upscale=True, can_downscale=True):
assert isinstance(size, (float,int)) or (len(size) == 2)
self.size = size
if isinstance(self.size, float):
assert 0 < self.size <= 4, 'bad float self.size, cannot be outside of range ]0,4]'
self.interpolation = interpolation
self.largest = largest
self.can_upscale = can_upscale
self.can_downscale = can_downscale
def get_params(self, imsize):
w,h = imsize
if isinstance(self.size, int):
is_smaller = lambda a,b: (a>=b) if self.largest else (a<=b)
if (is_smaller(w, h) and w == self.size) or (is_smaller(h, w) and h == self.size):
ow, oh = w, h
elif is_smaller(w, h):
ow = self.size
oh = int(0.5 + self.size * h / w)
else:
oh = self.size
ow = int(0.5 + self.size * w / h)
elif isinstance(self.size, float):
ow, oh = int(0.5 + self.size*w), int(0.5 + self.size*h)
else: # tuple of ints
ow, oh = self.size
return ow, oh
def __call__(self, inp):
img = F.grab_img(inp)
w, h = img.size
size2 = ow,oh = self.get_params(img.size)
if size2 != img.size:
a1, a2 = img.size, size2
if (self.can_upscale and min(a1) < min(a2)) or (self.can_downscale and min(a1) > min(a2)):
img = img.resize(size2, self.interpolation)
return F.update_img_and_labels(inp, img, aff=(ow/w,0,0,0,oh/h,0))
class RandomScale (Scale):
"""Rescale the input PIL.Image to a random size.
Args:
min_size (int): min size of the smaller edge of the picture.
max_size (int): max size of the smaller edge of the picture.
ar (float or tuple):
max change of aspect ratio (width/height).
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, min_size, max_size, ar=1, can_upscale=False, can_downscale=True, interpolation=Image.BILINEAR, largest=False):
Scale.__init__(self, 0, can_upscale=can_upscale, can_downscale=can_downscale, interpolation=interpolation, largest=largest)
assert isinstance(min_size, int) and min_size >= 1
assert isinstance(max_size, int) and min_size <= max_size
self.min_size = min_size
self.max_size = max_size
if type(ar) in (float,int): ar = (min(1/ar,ar),max(1/ar,ar))
assert 0.2 < ar[0] <= ar[1] < 5
self.ar = ar
self.largest = largest
def get_params(self, imsize):
w,h = imsize
if self.can_upscale:
max_size = self.max_size
else:
max_size = min(self.max_size,min(w,h))
size = max(min(int(0.5 + F.rand_log_uniform(self.min_size,self.max_size)), self.max_size), self.min_size)
ar = F.rand_log_uniform(*self.ar) # change of aspect ratio
if not self.largest:
if w < h : # image is taller
ow = size
oh = int(0.5 + size * h / w / ar)
if oh < self.min_size:
ow,oh = int(0.5 + ow*float(self.min_size)/oh),self.min_size
else: # image is wider
oh = size
ow = int(0.5 + size * w / h * ar)
if ow < self.min_size:
ow,oh = self.min_size,int(0.5 + oh*float(self.min_size)/ow)
assert ow >= self.min_size
assert oh >= self.min_size
else: # if self.largest
if w > h: # image is wider
ow = size
oh = int(0.5 + size * h / w / ar)
else: # image is taller
oh = size
ow = int(0.5 + size * w / h * ar)
assert ow <= self.max_size
assert oh <= self.max_size
return ow, oh
class RandomCrop (object):
"""Crop the given PIL Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
"""
def __init__(self, size, padding=0):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
@staticmethod
def get_params(img, output_size):
w, h = img.size
th, tw = output_size
assert h >= th and w >= tw, "Image of %dx%d is too small for crop %dx%d" % (w,h,tw,th)
y = np.random.randint(0, h - th) if h > th else 0
x = np.random.randint(0, w - tw) if w > tw else 0
return x, y, tw, th
def __call__(self, inp):
img = F.grab_img(inp)
padl = padt = 0
if self.padding > 0:
if F.is_pil_image(img):
img = ImageOps.expand(img, border=self.padding, fill=0)
else:
assert isinstance(img, F.DummyImg)
img = img.expand(border=self.padding)
if isinstance(self.padding, int):
padl = padt = self.padding
else:
padl, padt = self.padding[0:2]
i, j, tw, th = self.get_params(img, self.size)
img = img.crop((i, j, i+tw, j+th))
return F.update_img_and_labels(inp, img, aff=(1,0,padl-i,0,1,padt-j))
class CenterCrop (RandomCrop):
"""Crops the given PIL Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
@staticmethod
def get_params(img, output_size):
w, h = img.size
th, tw = output_size
y = int(0.5 +((h - th) / 2.))
x = int(0.5 +((w - tw) / 2.))
return x, y, tw, th
class CropToBbox(object):
""" Crop the image according to the bounding box.
margin (float):
ensure a margin around the bbox equal to (margin * min(bbWidth,bbHeight))
min_size (int):
result cannot be smaller than this size
"""
def __init__(self, margin=0.5, min_size=0):
self.margin = margin
self.min_size = min_size
def __call__(self, inp):
img = F.grab_img(inp)
w, h = img.size
assert min(w,h) >= self.min_size
x0,y0,x1,y1 = inp['bbox']
assert x0 < x1 and y0 < y1, pdb.set_trace()
bbw, bbh = x1-x0, y1-y0
margin = int(0.5 + self.margin * min(bbw, bbh))
i = max(0, x0 - margin)
j = max(0, y0 - margin)
w = min(w, x1 + margin) - i
h = min(h, y1 + margin) - j
if w < self.min_size:
i = max(0, i-(self.min_size-w)//2)
w = self.min_size
if h < self.min_size:
j = max(0, j-(self.min_size-h)//2)
h = self.min_size
img = img.crop((i,j,i+w,j+h))
return F.update_img_and_labels(inp, img, aff=(1,0,-i,0,1,-j))
class RandomRotation(object):
"""Rescale the input PIL.Image to a random size.
Args:
degrees (float):
rotation angle.
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, degrees, interpolation=Image.BILINEAR):
self.degrees = degrees
self.interpolation = interpolation
def __call__(self, inp):
img = F.grab_img(inp)
w, h = img.size
angle = np.random.uniform(-self.degrees, self.degrees)
img = img.rotate(angle, resample=self.interpolation)
w2, h2 = img.size
aff = F.aff_translate(-w/2,-h/2)
aff = F.aff_mul(aff, F.aff_rotate(-angle * np.pi/180))
aff = F.aff_mul(aff, F.aff_translate(w2/2,h2/2))
return F.update_img_and_labels(inp, img, aff=aff)
class RandomFlip (object):
"""Randomly flip the image.
"""
def __call__(self, inp):
img = F.grab_img(inp)
w, h = img.size
flip = np.random.rand() < 0.5
if flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return F.update_img_and_labels(inp, img, aff=(-1,0,w-1,0,1,0))
class RandomTilting(object):
"""Apply a random tilting (left, right, up, down) to the input PIL.Image
Args:
maginitude (float):
maximum magnitude of the random skew (value between 0 and 1)
directions (string):
tilting directions allowed (all, left, right, up, down)
examples: "all", "left,right", "up-down-right"
"""
def __init__(self, magnitude, directions='all'):
self.magnitude = magnitude
self.directions = directions.lower().replace(',',' ').replace('-',' ')
def __call__(self, inp):
img = F.grab_img(inp)
w, h = img.size
x1,y1,x2,y2 = 0,0,h,w
original_plane = [(y1, x1), (y2, x1), (y2, x2), (y1, x2)]
max_skew_amount = max(w, h)
max_skew_amount = int(ceil(max_skew_amount * self.magnitude))
skew_amount = random.randint(1, max_skew_amount)
if self.directions == 'all':
choices = [0,1,2,3]
else:
dirs = ['left', 'right', 'up', 'down']
choices = []
for d in self.directions.split():
try:
choices.append(dirs.index(d))
except:
raise ValueError('Tilting direction %s not recognized' % d)
skew_direction = random.choice(choices)
if skew_direction == 0:
# Left Tilt
new_plane = [(y1, x1 - skew_amount), # Top Left
(y2, x1), # Top Right
(y2, x2), # Bottom Right
(y1, x2 + skew_amount)] # Bottom Left
elif skew_direction == 1:
# Right Tilt
new_plane = [(y1, x1), # Top Left
(y2, x1 - skew_amount), # Top Right
(y2, x2 + skew_amount), # Bottom Right
(y1, x2)] # Bottom Left
elif skew_direction == 2:
# Forward Tilt
new_plane = [(y1 - skew_amount, x1), # Top Left
(y2 + skew_amount, x1), # Top Right
(y2, x2), # Bottom Right
(y1, x2)] # Bottom Left
elif skew_direction == 3:
# Backward Tilt
new_plane = [(y1, x1), # Top Left
(y2, x1), # Top Right
(y2 + skew_amount, x2), # Bottom Right
(y1 - skew_amount, x2)] # Bottom Left
# To calculate the coefficients required by PIL for the perspective skew,
# see the following Stack Overflow discussion: https://goo.gl/sSgJdj
matrix = []
for p1, p2 in zip(new_plane, original_plane):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
A = np.matrix(matrix, dtype=np.float)
B = np.array(original_plane).reshape(8)
homography = np.dot(np.linalg.pinv(A), B)
homography = tuple(np.array(homography).reshape(8))
img = img.transform(img.size, Image.PERSPECTIVE, homography, resample=Image.BICUBIC)
homography = np.linalg.pinv(np.float32(homography+(1,)).reshape(3,3)).ravel()[:8]
return F.update_img_and_labels(inp, img, persp=tuple(homography))
class StillTransform (object):
""" Takes and return an image, without changing its shape or geometry.
"""
def _transform(self, img):
raise NotImplementedError()
def __call__(self, inp):
img = F.grab_img(inp)
# transform the image (size should not change)
img = self._transform(img)
return F.update_img_and_labels(inp, img, aff=(1,0,0,0,1,0))
class ColorJitter (StillTransform):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness > 0:
brightness_factor = np.random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(tvf.Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast > 0:
contrast_factor = np.random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(tvf.Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation > 0:
saturation_factor = np.random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(tvf.Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue > 0:
hue_factor = np.random.uniform(-hue, hue)
transforms.append(tvf.Lambda(lambda img: F.adjust_hue(img, hue_factor)))
np.random.shuffle(transforms)
transform = tvf.Compose(transforms)
return transform
def _transform(self, img):
transform = self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
return transform(img)
class RandomErasing (StillTransform):
"""
Class that performs Random Erasing, an augmentation technique described
in `https://arxiv.org/abs/1708.04896 <https://arxiv.org/abs/1708.04896>`_
by Zhong et al. To quote the authors, random erasing:
"*... randomly selects a rectangle region in an image, and erases its
pixels with random values.*"
The size of the random rectangle is controlled using the
:attr:`area` parameter. This area is random in its
width and height.
Args:
area: The percentage area of the image to occlude.
"""
def __init__(self, area):
self.area = area
def _transform(self, image):
"""
Adds a random noise rectangle to a random area of the passed image,
returning the original image with this rectangle superimposed.
:param image: The image to add a random noise rectangle to.
:type image: PIL.Image
:return: The image with the superimposed random rectangle as type
image PIL.Image
"""
w, h = image.size
w_occlusion_max = int(w * self.area)
h_occlusion_max = int(h * self.area)
w_occlusion_min = int(w * self.area/2)
h_occlusion_min = int(h * self.area/2)
if not (w_occlusion_min < w_occlusion_max and h_occlusion_min < h_occlusion_max):
return image
w_occlusion = np.random.randint(w_occlusion_min, w_occlusion_max)
h_occlusion = np.random.randint(h_occlusion_min, h_occlusion_max)
if len(image.getbands()) == 1:
rectangle = Image.fromarray(np.uint8(np.random.rand(w_occlusion, h_occlusion) * 255))
else:
rectangle = Image.fromarray(np.uint8(np.random.rand(w_occlusion, h_occlusion, len(image.getbands())) * 255))
assert w > w_occlusion and h > h_occlusion, pdb.set_trace()
random_position_x = np.random.randint(0, w - w_occlusion)
random_position_y = np.random.randint(0, h - h_occlusion)
image = image.copy() # don't modify the original
image.paste(rectangle, (random_position_x, random_position_y))
return image
class ToTensor (StillTransform, tvf.ToTensor):
def _transform(self, img):
return tvf.ToTensor.__call__(self, img)
class Normalize (StillTransform, tvf.Normalize):
def _transform(self, img):
return tvf.Normalize.__call__(self, img)
class BBoxToPixelLabel (object):
""" Convert a bbox into per-pixel label
"""
def __init__(self, nclass, downsize, mode):
self.nclass = nclass
self.downsize = downsize
self.mode = mode
self.nbin = 5
self.log_scale = 1.5
self.ref_scale = 8.0
def __call__(self, inp):
assert isinstance(inp, dict)
w, h = inp['img'].size
ds = self.downsize
assert w % ds == 0
assert h % ds == 0
x0,y0,x1,y1 = inp['bbox']
inp['bbox'] = np.int64(inp['bbox'])
ll = x0/ds
rr = (x1-1)/ds
tt = y0/ds
bb = (y1-1)/ds
l = max(0, int(ll))
r = min(w//ds, 1+int(rr))
t = max(0, int(tt))
b = min(h//ds, 1+int(bb))
inp['bbox_downscaled'] = np.array((l,t,r,b), dtype=np.int64)
W, H = w//ds, h//ds
res = np.zeros((H,W), dtype=np.int64)
res[:] = self.nclass # last bin is null class
res[t:b, l:r] = inp['label']
inp['pix_label'] = res
if self.mode == 'hough':
# compute hough parameters
topos = lambda left, pos, right: np.floor( self.nbin * (pos - left) / (right - left) )
def tolog(size):
size = max(size,1e-8) # make it positive
return np.round( np.log(size / self.ref_scale) / np.log(self.log_scale) + (self.nbin-1)/2 )
# for each pixel, find its x and y position
yc,xc = np.mgrid[0:H, 0:W]
res = -np.ones((4, H, W), dtype=np.int64)
res[0] = topos(ll, xc, rr)
res[1] = topos(tt, yc, bb)
res[2] = tolog(rr - ll)
res[3] = tolog(bb - tt)
res = np.clip(res, 0, self.nbin-1)
inp['pix_bbox_hough'] = res
elif self.mode == 'regr':
topos = lambda left, pos, right: (pos - left) / (right - left)
def tolog(size):
size = max(size,1) # make it positive
return np.log(size / self.ref_scale) / np.log(self.log_scale)
# for each pixel, find its x and y position
yc,xc = np.float64(np.mgrid[0:H, 0:W]) + 0.5
res = -np.ones((4, H, W), dtype=np.float32)
res[0] = topos(ll, xc, rr)
res[1] = topos(tt, yc, bb)
res[2] = tolog(rr - ll)
res[3] = tolog(bb - tt)
inp['pix_bbox_regr'] = res
else:
raise NotImplementedError()
return inp
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser("Script to try out and visualize transformations")
parser.add_argument('--img', type=str, default='$HERE/test.png', help='input image')
parser.add_argument('--trfs', type=str, required=True, help='sequence of transformations')
parser.add_argument('--bbox', action='store_true', help='add a bounding box')
parser.add_argument('--polygons', action='store_true', help='add a polygon')
parser.add_argument('--input_size', type=int, default=224, help='optional param')
parser.add_argument('--layout', type=int, nargs=2, default=(3,3), help='Number of rows and columns')
args = parser.parse_args()
import os
args.img = args.img.replace('$HERE',os.path.dirname(__file__))
img = Image.open(args.img)
if args.bbox or args.polygons:
img = dict(img=img)
if args.bbox:
w, h = img['img'].size
img['bbox'] = (w/4,h/4,3*w/4,3*h/4)
if args.polygons:
w, h = img['img'].size
img['polygons'] = [(1,[(w/4,h/4),(w/2,h/4),(w/4,h/2)])]
trfs = create(args.trfs, input_size=args.input_size)
from matplotlib import pyplot as pl
pl.ion()
pl.subplots_adjust(0,0,1,1)
nr,nc = args.layout
while True:
for j in range(nr):
for i in range(nc):
pl.subplot(nr,nc,i+j*nc+1)
if i==j==0:
img2 = img
else:
img2 = trfs(img.copy())
if isinstance(img2, dict):
if 'bbox' in img2:
l,t,r,b = img2['bbox']
x,y = [l,r,r,l,l], [t,t,b,b,t]
pl.plot(x,y,'--',lw=5)
if 'polygons' in img2:
for label, pts in img2['polygons']:
x,y = zip(*pts)
pl.plot(x,y,'-',lw=5)
img2 = img2['img']
pl.imshow(img2)
pl.xlabel("%d x %d" % img2.size)
pl.xticks(())
pl.yticks(())
pdb.set_trace()
| 25,933 | 32.945026 | 133 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-vocoder/inference.py | # Copyright 2022 (c) Microsoft Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os, sys
import torch
import torchaudio
from tqdm import tqdm
from pathlib import Path
from dataset import from_path_valid as dataset_from_path_valid
from argparse import ArgumentParser
from model import PriorGrad
from learner import _nested_map
device = torch.device("cuda")
def load_state_dict(model, state_dict):
if hasattr(model, 'module') and isinstance(model.module, torch.nn.Module):
model.module.load_state_dict(state_dict['model'])
else:
model.load_state_dict(state_dict['model'])
step = state_dict['step']
return model, step
def restore_from_checkpoint(model, model_dir, step, filename='weights'):
try:
checkpoint = torch.load(f'{model_dir}/{filename}-{step}.pt')
model, step = load_state_dict(model, checkpoint)
print("Loaded {}".format(f'{model_dir}/{filename}-{step}.pt'))
return model, step
except FileNotFoundError:
print("Trying to load {}...".format(f'{model_dir}/{filename}.pt'))
checkpoint = torch.load(f'{model_dir}/{filename}.pt')
model, step = load_state_dict(model, checkpoint)
print("Loaded {} from {} step checkpoint".format(f'{model_dir}/{filename}.pt', step))
return model, step
def predict(model, spectrogram, target_std, global_cond=None, fast_sampling=True):
with torch.no_grad():
# Change in notation from the Diffwave paper for fast sampling.
# DiffWave paper -> Implementation below
# --------------------------------------
# alpha -> talpha
# beta -> training_noise_schedule
# gamma -> alpha
# eta -> beta
training_noise_schedule = np.array(model.params.noise_schedule)
inference_noise_schedule = np.array(
model.params.inference_noise_schedule) if fast_sampling else training_noise_schedule
talpha = 1 - training_noise_schedule
talpha_cum = np.cumprod(talpha)
beta = inference_noise_schedule
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
T = []
for s in range(len(inference_noise_schedule)):
for t in range(len(training_noise_schedule) - 1):
if talpha_cum[t + 1] <= alpha_cum[s] <= talpha_cum[t]:
twiddle = (talpha_cum[t] ** 0.5 - alpha_cum[s] ** 0.5) / (
talpha_cum[t] ** 0.5 - talpha_cum[t + 1] ** 0.5)
T.append(t + twiddle)
break
T = np.array(T, dtype=np.float32)
# Expand rank 2 tensors by adding a batch dimension.
if len(spectrogram.shape) == 2:
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
audio = torch.randn(spectrogram.shape[0], model.params.hop_samples * spectrogram.shape[-1],
device=device) * target_std
noise_scale = torch.from_numpy(alpha_cum ** 0.5).float().unsqueeze(1).to(device)
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n] ** 0.5
c2 = beta[n] / (1 - alpha_cum[n]) ** 0.5
audio = c1 * (audio - c2 * model(audio, spectrogram, torch.tensor([T[n]], device=audio.device),
global_cond).squeeze(1))
if n > 0:
noise = torch.randn_like(audio) * target_std
sigma = ((1.0 - alpha_cum[n - 1]) / (1.0 - alpha_cum[n]) * beta[n]) ** 0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0)
return audio
def main(args):
# load saved params_saved.py in model_dir
sys.path.append(os.path.join(args.model_dir))
# load the saved parameters of the model from "params_saved.py"
import params_saved
params = params_saved.params
# override noise_schedule param for additional tests
T_OVERRIDE = args.fast_iter
if args.fast:
if T_OVERRIDE is not None:
if T_OVERRIDE == 6:
NOISE_OVERRIDE = [0.0001, 0.001, 0.01, 0.05, 0.2, 0.5]
elif T_OVERRIDE == 12:
NOISE_OVERRIDE = [0.0001, 0.0005, 0.0008, 0.001, 0.005, 0.008, 0.01, 0.05, 0.08, 0.1, 0.2, 0.5]
elif T_OVERRIDE == 50:
NOISE_OVERRIDE = np.linspace(1e-4, 0.05, T_OVERRIDE).tolist()
else:
NOISE_OVERRIDE = np.linspace(1e-4, 0.05, T_OVERRIDE).tolist()
print(
"WARNING: --fast_iter other than [6, 12] is given. Using linear beta schedule: performance is expected to be WORSE!")
params.inference_noise_schedule = NOISE_OVERRIDE
print("INFO: inference noise schedule updated, fast_iter {} value {}".format(
len(params.inference_noise_schedule), params.inference_noise_schedule))
else:
T_OVERRIDE = len(params.inference_noise_schedule)
dataset_test = dataset_from_path_valid(args.data_root, args.filelist, params)
model = PriorGrad(params)
model, step = restore_from_checkpoint(model, args.model_dir, args.step)
model = model.to(device)
model.eval()
dir_parent = Path(args.model_dir).parent
dir_base = os.path.basename(args.model_dir)
if args.fast:
sample_path = os.path.join(dir_parent, 'sample_fast',
dir_base + '_step{}_fast_iter{}'.format(step, T_OVERRIDE))
else:
sample_path = os.path.join(dir_parent, 'sample_slow', dir_base + '_step{}'.format(step))
os.makedirs(sample_path, exist_ok=True)
# do test set inference for given checkpoint and exit
for i, features in tqdm(enumerate(dataset_test)):
features = _nested_map(features, lambda x: x.to(device) if isinstance(x, torch.Tensor) else x)
with torch.no_grad():
audio_gt = features['audio']
spectrogram = features['spectrogram']
target_std = features['target_std']
if params.condition_prior:
target_std_specdim = target_std[:, ::params.hop_samples].unsqueeze(1)
spectrogram = torch.cat([spectrogram, target_std_specdim], dim=1)
global_cond = None
elif params.condition_prior_global:
target_std_specdim = target_std[:, ::params.hop_samples].unsqueeze(1)
global_cond = target_std_specdim
else:
global_cond = None
audio = predict(model, spectrogram, target_std, global_cond=global_cond, fast_sampling=args.fast)
sample_name = "{:04d}.wav".format(i + 1)
torchaudio.save(os.path.join(sample_path, sample_name), audio.cpu(), sample_rate=model.params.sample_rate)
if __name__ == '__main__':
parser = ArgumentParser(description='runs inference from the test set filelist')
parser.add_argument('model_dir',
help='directory containing a trained model (or full path to weights.pt file)')
parser.add_argument('data_root',
help='root of the dataset. used to save the statistics for PriorGrad.'
'example: for LJSpeech, specify /path/to/your/LJSpeech-1.1')
parser.add_argument('filelist',
help='text file containing data path.'
'example: for LJSpeech, refer to ./filelists/test.txt')
parser.add_argument('--step', type=int, default=None,
help='number of training step checkpoint to load.'
'If not provided, tries to load the symlinked weights.pt')
parser.add_argument('--fast', '-f', action='store_true', default=False,
help='fast sampling procedure')
parser.add_argument('--fast_iter', '-t', type=int, default=None,
help='number of fast inference diffusion steps for sampling.'
'6, 12, and 50 steps are officially supported. If other value is provided, linear beta schedule is used.')
main(parser.parse_args())
| 9,379 | 44.533981 | 137 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-vocoder/__main__.py | # Copyright 2022 (c) Microsoft Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from argparse import ArgumentParser
from torch.cuda import device_count
from torch.multiprocessing import spawn
from learner import train, train_distributed
from params import params
import os, shutil
def _get_free_port():
import socketserver
with socketserver.TCPServer(('localhost', 0), None) as s:
return s.server_address[1]
def main(args):
print('Dumping hyperparameter file...')
os.makedirs(args.model_dir, exist_ok=True)
shutil.copy('params.py', os.path.join(args.model_dir, 'params_saved.py'))
replica_count = device_count()
if replica_count > 1:
if params.batch_size % replica_count != 0:
raise ValueError(f'Batch size {params.batch_size} is not evenly divisble by # GPUs {replica_count}.')
params.batch_size = params.batch_size // replica_count
port = _get_free_port()
spawn(train_distributed, args=(replica_count, port, args, params), nprocs=replica_count, join=True)
else:
train(args, params)
if __name__ == '__main__':
parser = ArgumentParser(description='train (or resume training) a PriorGrad model')
parser.add_argument('model_dir',
help='directory in which to store model checkpoints and training logs')
parser.add_argument('data_root',
help='root of the dataset. used to save the statistics for PriorGrad.'
'example: for LJSpeech, specify /path/to/your/LJSpeech-1.1')
parser.add_argument('filelist',
help='text file containing data path.'
'example: for LJSpeech, refer to ./filelists/train.txt')
parser.add_argument('--max_steps', default=None, type=int,
help='maximum number of training steps')
parser.add_argument('--fp16', action='store_true', default=False,
help='use 16-bit floating point operations for training')
main(parser.parse_args())
| 3,151 | 41.026667 | 107 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-vocoder/learner.py | # Copyright 2022 (c) Microsoft Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import torch
import torch.nn as nn
from pathlib import Path
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from dataset import from_path as dataset_from_path
from dataset import from_path_valid as dataset_from_path_valid
from model import PriorGrad
from preprocess import get_mel
def _nested_map(struct, map_fn):
if isinstance(struct, tuple):
return tuple(_nested_map(x, map_fn) for x in struct)
if isinstance(struct, list):
return [_nested_map(x, map_fn) for x in struct]
if isinstance(struct, dict):
return {k: _nested_map(v, map_fn) for k, v in struct.items()}
return map_fn(struct)
def scaled_mse_loss(decoder_output, target, target_std):
# inverse of diagonal matrix is 1/x for each element
sigma_inv = torch.reciprocal(target_std)
mse_loss = (((decoder_output - target) * sigma_inv) ** 2)
mse_loss = (mse_loss).sum() / torch.numel(decoder_output)
return mse_loss
class PriorGradLearner:
def __init__(self, model_dir, model, dataset, dataset_val, optimizer, params, *args, **kwargs):
os.makedirs(model_dir, exist_ok=True)
self.model_dir = model_dir
self.model = model
self.dataset = dataset
self.dataset_val = dataset_val
self.optimizer = optimizer
self.params = params
self.autocast = torch.cuda.amp.autocast(enabled=kwargs.get('fp16', False))
self.scaler = torch.cuda.amp.GradScaler(enabled=kwargs.get('fp16', False))
self.step = 0
self.is_master = True
self.use_l2loss = params.use_l2loss
self.use_prior = params.use_prior
self.condition_prior = params.condition_prior
self.condition_prior_global = params.condition_prior_global
assert not (self.condition_prior and self.condition_prior_global),\
"use only one of the following parameter: condition_prior or condition_prior_global"
beta = np.array(self.params.noise_schedule)
noise_level = np.cumprod(1 - beta)
self.noise_level = torch.tensor(noise_level.astype(np.float32))
self.summary_writer = None
def state_dict(self):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
model_state = self.model.module.state_dict()
else:
model_state = self.model.state_dict()
return {
'step': self.step,
'model': {k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in model_state.items()},
'optimizer': {k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in
self.optimizer.state_dict().items()},
'params': dict(self.params),
'scaler': self.scaler.state_dict(),
}
def load_state_dict(self, state_dict):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
self.model.module.load_state_dict(state_dict['model'])
else:
self.model.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.scaler.load_state_dict(state_dict['scaler'])
self.step = state_dict['step']
def save_to_checkpoint(self, filename='weights'):
save_basename = f'{filename}-{self.step}.pt'
save_name = f'{self.model_dir}/{save_basename}'
link_name = f'{self.model_dir}/{filename}.pt'
torch.save(self.state_dict(), save_name)
if os.name == 'nt':
torch.save(self.state_dict(), link_name)
else:
if os.path.islink(link_name):
os.unlink(link_name)
os.symlink(save_basename, link_name)
def restore_from_checkpoint(self, filename='weights'):
try:
checkpoint = torch.load(f'{self.model_dir}/{filename}.pt')
self.load_state_dict(checkpoint)
return True
except FileNotFoundError:
return False
def train(self, max_steps=None):
device = next(self.model.parameters()).device
while True:
for features in tqdm(self.dataset,
desc=f'Epoch {self.step // len(self.dataset)}') if self.is_master else self.dataset:
if max_steps is not None and self.step > max_steps:
return
features = _nested_map(features, lambda x: x.to(device) if isinstance(x, torch.Tensor) else x)
loss, predicted = self.train_step(features)
if torch.isnan(loss).any():
raise RuntimeError(f'Detected NaN loss at step {self.step}.')
if self.is_master:
if self.step % 50 == 0:
self._write_summary(self.step, features, loss)
if self.step % 10000 == 0:
self.run_valid_loop()
if self.step % 50000 == 0:
print("INFO: saving checkpoint at step {}".format(self.step))
self.save_to_checkpoint()
self.step += 1
def train_step(self, features):
for param in self.model.parameters():
param.grad = None
audio = features['audio']
spectrogram = features['spectrogram']
target_std = features['target_std']
if self.condition_prior:
target_std_specdim = target_std[:, ::self.params.hop_samples].unsqueeze(1)
spectrogram = torch.cat([spectrogram, target_std_specdim], dim=1)
global_cond = None
elif self.condition_prior_global:
target_std_specdim = target_std[:, ::self.params.hop_samples].unsqueeze(1)
global_cond = target_std_specdim
else:
global_cond = None
N, T = audio.shape
device = audio.device
self.noise_level = self.noise_level.to(device)
with self.autocast:
t = torch.randint(0, len(self.params.noise_schedule), [N], device=audio.device)
noise_scale = self.noise_level[t].unsqueeze(1)
noise_scale_sqrt = noise_scale ** 0.5
noise = torch.randn_like(audio)
noise = noise * target_std
noisy_audio = noise_scale_sqrt * audio + (1.0 - noise_scale) ** 0.5 * noise
predicted = self.model(noisy_audio, spectrogram, t, global_cond)
if self.use_prior:
if self.use_l2loss:
loss = scaled_mse_loss(predicted.squeeze(1), noise, target_std)
else:
raise NotImplementedError
else:
if self.use_l2loss:
loss = nn.MSELoss()(noise, predicted.squeeze(1))
else:
loss = nn.L1Loss()(noise, predicted.squeeze(1))
self.scaler.scale(loss).backward()
self.scaler.unscale_(self.optimizer)
self.grad_norm = nn.utils.clip_grad_norm_(self.model.parameters(), self.params.max_grad_norm or 1e9)
self.scaler.step(self.optimizer)
self.scaler.update()
return loss, predicted
def run_valid_loop(self):
with torch.no_grad():
device = next(self.model.parameters()).device
losses = []
losses_l1 = []
audio_preds = []
for features in tqdm(self.dataset_val,
desc=f'Valid {len(self.dataset_val)}') if self.is_master else self.dataset_val:
features = _nested_map(features, lambda x: x.to(device) if isinstance(x, torch.Tensor) else x)
audio = features['audio']
spectrogram = features['spectrogram']
target_std = features['target_std']
if self.condition_prior:
target_std_specdim = target_std[:, ::self.params.hop_samples].unsqueeze(1)
spectrogram = torch.cat([spectrogram, target_std_specdim], dim=1)
global_cond = None
elif self.condition_prior_global:
target_std_specdim = target_std[:, ::self.params.hop_samples].unsqueeze(1)
global_cond = target_std_specdim
else:
global_cond = None
N, T = audio.shape
device = audio.device
self.noise_level = self.noise_level.to(device)
t = torch.randint(0, len(self.params.noise_schedule), [N], device=audio.device)
noise_scale = self.noise_level[t].unsqueeze(1)
noise_scale_sqrt = noise_scale ** 0.5
noise = torch.randn_like(audio)
noise = noise * target_std
noisy_audio = noise_scale_sqrt * audio + (1.0 - noise_scale) ** 0.5 * noise
if hasattr(self.model, 'module'):
predicted = self.model.module(noisy_audio, spectrogram, t, global_cond)
else:
predicted = self.model(noisy_audio, spectrogram, t, global_cond)
if self.use_prior:
if self.use_l2loss:
loss = scaled_mse_loss(predicted.squeeze(1), noise, target_std)
else:
raise NotImplementedError
else:
if self.use_l2loss:
loss = nn.MSELoss()(noise, predicted.squeeze(1))
else:
loss = nn.L1Loss()(noise, predicted.squeeze(1))
losses.append(loss.cpu().numpy())
audio_pred = self.predict(spectrogram, target_std, global_cond)
audio_preds.append(audio_pred.cpu().numpy())
loss_l1 = torch.nn.L1Loss()(get_mel(audio_pred.squeeze(0), self.params), spectrogram).item()
losses_l1.append(loss_l1)
loss_valid = np.mean(losses)
loss_l1 = np.mean(losses_l1)
self._write_summary_valid(self.step, loss_valid, loss_l1, audio_preds)
def predict(self, spectrogram, target_std, global_cond=None):
with torch.no_grad():
device = next(self.model.parameters()).device
# Change in notation from the PriorGrad paper for fast sampling.
# PriorGrad paper -> Implementation below
# --------------------------------------
# alpha -> talpha
# beta -> training_noise_schedule
# gamma -> alpha
# eta -> beta
training_noise_schedule = np.array(self.params.noise_schedule)
inference_noise_schedule = np.array(self.params.inference_noise_schedule)
talpha = 1 - training_noise_schedule
talpha_cum = np.cumprod(talpha)
beta = inference_noise_schedule
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
T = []
for s in range(len(inference_noise_schedule)):
for t in range(len(training_noise_schedule) - 1):
if talpha_cum[t + 1] <= alpha_cum[s] <= talpha_cum[t]:
twiddle = (talpha_cum[t] ** 0.5 - alpha_cum[s] ** 0.5) / (
talpha_cum[t] ** 0.5 - talpha_cum[t + 1] ** 0.5)
T.append(t + twiddle)
break
T = np.array(T, dtype=np.float32)
# Expand rank 2 tensors by adding a batch dimension.
if len(spectrogram.shape) == 2:
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
audio = torch.randn(spectrogram.shape[0], self.params.hop_samples * spectrogram.shape[-1],
device=device) * target_std
noise_scale = torch.from_numpy(alpha_cum ** 0.5).float().unsqueeze(1).to(device)
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n] ** 0.5
c2 = beta[n] / (1 - alpha_cum[n]) ** 0.5
if hasattr(self.model, 'module'):
audio = c1 * (audio - c2 * self.model.module(audio, spectrogram, torch.tensor([T[n]], device=audio.device),
global_cond).squeeze(1))
else:
audio = c1 * (audio - c2 * self.model(audio, spectrogram, torch.tensor([T[n]], device=audio.device),
global_cond).squeeze(1))
if n > 0:
noise = torch.randn_like(audio) * target_std
sigma = ((1.0 - alpha_cum[n - 1]) / (1.0 - alpha_cum[n]) * beta[n]) ** 0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0)
return audio
def _write_summary(self, step, features, loss):
writer = self.summary_writer or SummaryWriter(self.model_dir, purge_step=step)
writer.add_audio('feature/audio', features['audio'][0], step, sample_rate=self.params.sample_rate)
writer.add_image('feature/spectrogram', torch.flip(features['spectrogram'][:1], [1]), step)
writer.add_scalar('train/loss', loss, step)
writer.add_scalar('train/grad_norm', self.grad_norm, step)
writer.flush()
self.summary_writer = writer
def _write_summary_valid(self, step, loss, loss_l1, audio_preds):
writer = self.summary_writer or SummaryWriter(self.model_dir, purge_step=step)
for i in range(len(audio_preds)):
writer.add_audio('valid/audio_pred_{}'.format(i), audio_preds[i], step, sample_rate=self.params.sample_rate)
writer.add_scalar('valid/loss', loss, step)
writer.add_scalar('valid/loss_lsmae', loss_l1, step)
writer.flush()
self.summary_writer = writer
def _train_impl(replica_id, model, dataset, dataset_val, args, params):
torch.backends.cudnn.benchmark = True
opt = torch.optim.Adam(model.parameters(), lr=params.learning_rate)
learner = PriorGradLearner(args.model_dir, model, dataset, dataset_val, opt, params, fp16=args.fp16)
learner.is_master = (replica_id == 0)
learner.restore_from_checkpoint()
learner.train(max_steps=args.max_steps)
def train(args, params):
dataset = dataset_from_path(args.data_root, args.filelist, params)
dataset_val = dataset_from_path_valid(args.data_root, os.path.join(Path(args.filelist).parent, "valid.txt"), params)
model = PriorGrad(params).cuda()
_train_impl(0, model, dataset, dataset_val, args, params)
def train_distributed(replica_id, replica_count, port, args, params):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(port)
torch.distributed.init_process_group('nccl', rank=replica_id, world_size=replica_count)
device = torch.device('cuda', replica_id)
torch.cuda.set_device(device)
model = PriorGrad(params).to(device)
model = DistributedDataParallel(model, device_ids=[replica_id])
dataset = dataset_from_path(args.data_root, args.filelist, params, is_distributed=True)
if replica_id == 0:
dataset_val = dataset_from_path_valid(args.data_root, os.path.join(Path(args.filelist).parent, "valid.txt"), params, is_distributed=False)
else:
dataset_val = None
_train_impl(replica_id, model, dataset, dataset_val, args, params)
| 16,812 | 43.478836 | 146 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-vocoder/model.py | # Copyright 2022 (c) Microsoft Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt
Linear = nn.Linear
ConvTranspose2d = nn.ConvTranspose2d
def Conv1d(*args, **kwargs):
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
@torch.jit.script
def silu(x):
return x * torch.sigmoid(x)
class DiffusionEmbedding(nn.Module):
def __init__(self, max_steps):
super().__init__()
self.register_buffer('embedding', self._build_embedding(max_steps), persistent=False)
self.projection1 = Linear(128, 512)
self.projection2 = Linear(512, 512)
def forward(self, diffusion_step):
if diffusion_step.dtype in [torch.int32, torch.int64]:
x = self.embedding[diffusion_step]
else:
x = self._lerp_embedding(diffusion_step)
x = self.projection1(x)
x = silu(x)
x = self.projection2(x)
x = silu(x)
return x
def _lerp_embedding(self, t):
low_idx = torch.floor(t).long()
high_idx = torch.ceil(t).long()
low = self.embedding[low_idx]
high = self.embedding[high_idx]
return low + (high - low) * (t - low_idx)
def _build_embedding(self, max_steps):
steps = torch.arange(max_steps).unsqueeze(1) # [T,1]
dims = torch.arange(64).unsqueeze(0) # [1,64]
table = steps * 10.0 ** (dims * 4.0 / 63.0) # [T,64]
table = torch.cat([torch.sin(table), torch.cos(table)], dim=1)
return table
class SpectrogramUpsampler(nn.Module):
def __init__(self, n_mels):
super().__init__()
self.conv1 = ConvTranspose2d(1, 1, [3, 32], stride=[1, 16], padding=[1, 8])
self.conv2 = ConvTranspose2d(1, 1, [3, 32], stride=[1, 16], padding=[1, 8])
def forward(self, x):
x = torch.unsqueeze(x, 1)
x = self.conv1(x)
x = F.leaky_relu(x, 0.4)
x = self.conv2(x)
x = F.leaky_relu(x, 0.4)
x = torch.squeeze(x, 1)
return x
class ResidualBlock(nn.Module):
def __init__(self, n_mels, residual_channels, dilation, n_cond_global=None):
super().__init__()
self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation)
self.diffusion_projection = Linear(512, residual_channels)
self.conditioner_projection = Conv1d(n_mels, 2 * residual_channels, 1)
if n_cond_global is not None:
self.conditioner_projection_global = Conv1d(n_cond_global, 2 * residual_channels, 1)
self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
def forward(self, x, conditioner, diffusion_step, conditioner_global=None):
diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
conditioner = self.conditioner_projection(conditioner)
y = x + diffusion_step
y = self.dilated_conv(y) + conditioner
if conditioner_global is not None:
y = y + self.conditioner_projection_global(conditioner_global)
gate, filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(filter)
y = self.output_projection(y)
residual, skip = torch.chunk(y, 2, dim=1)
return (x + residual) / sqrt(2.0), skip
class PriorGrad(nn.Module):
def __init__(self, params):
super().__init__()
self.params = params
self.use_prior = params.use_prior
self.condition_prior = params.condition_prior
self.condition_prior_global = params.condition_prior_global
assert not (self.condition_prior and self.condition_prior_global),\
"use only one option for conditioning on the prior"
print("use_prior: {}".format(self.use_prior))
self.n_mels = params.n_mels
self.n_cond = None
print("condition_prior: {}".format(self.condition_prior))
if self.condition_prior:
self.n_mels = self.n_mels + 1
print("self.n_mels increased to {}".format(self.n_mels))
print("condition_prior_global: {}".format(self.condition_prior_global))
if self.condition_prior_global:
self.n_cond = 1
self.input_projection = Conv1d(1, params.residual_channels, 1)
self.diffusion_embedding = DiffusionEmbedding(len(params.noise_schedule))
self.spectrogram_upsampler = SpectrogramUpsampler(self.n_mels)
if self.condition_prior_global:
self.global_condition_upsampler = SpectrogramUpsampler(self.n_cond)
self.residual_layers = nn.ModuleList([
ResidualBlock(self.n_mels, params.residual_channels, 2 ** (i % params.dilation_cycle_length),
n_cond_global=self.n_cond)
for i in range(params.residual_layers)
])
self.skip_projection = Conv1d(params.residual_channels, params.residual_channels, 1)
self.output_projection = Conv1d(params.residual_channels, 1, 1)
nn.init.zeros_(self.output_projection.weight)
print('num param: {}'.format(sum(p.numel() for p in self.parameters() if p.requires_grad)))
def forward(self, audio, spectrogram, diffusion_step, global_cond=None):
x = audio.unsqueeze(1)
x = self.input_projection(x)
x = F.relu(x)
diffusion_step = self.diffusion_embedding(diffusion_step)
spectrogram = self.spectrogram_upsampler(spectrogram)
if global_cond is not None:
global_cond = self.global_condition_upsampler(global_cond)
skip = []
for layer in self.residual_layers:
x, skip_connection = layer(x, spectrogram, diffusion_step, global_cond)
skip.append(skip_connection)
x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers))
x = self.skip_projection(x)
x = F.relu(x)
x = self.output_projection(x)
return x
| 7,288 | 38.4 | 116 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-vocoder/dataset.py | # Copyright 2022 (c) Microsoft Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import random
import torch
from tqdm import tqdm
from torch.utils.data.distributed import DistributedSampler
from pathlib import Path
from scipy.io.wavfile import read
from preprocess import MAX_WAV_VALUE, get_mel, normalize
device = torch.device("cuda")
def parse_filelist(filelist_path):
with open(filelist_path, 'r') as f:
filelist = [line.strip() for line in f.readlines()]
return filelist
class NumpyDataset(torch.utils.data.Dataset):
def __init__(self, data_root, filelist, params, is_training=True):
super().__init__()
self.data_root = Path(data_root)
self.params = params
self.filenames = []
self.filenames = parse_filelist(filelist)
if not is_training:
self.filenames = sorted(self.filenames)
self.hop_samples = params.hop_samples
self.is_training = is_training
self.use_prior = params.use_prior
self.max_energy_override = params.max_energy_override if hasattr(params, 'max_energy_override') else None
if self.is_training:
self.compute_stats()
if self.use_prior:
# build frame energy data for priorgrad
self.energy_max = float(np.load(str(self.data_root.joinpath('stats_priorgrad', 'energy_max_train.npy')),
allow_pickle=True))
self.energy_min = float(np.load(str(self.data_root.joinpath('stats_priorgrad', 'energy_min_train.npy')),
allow_pickle=True))
print("INFO: loaded frame-level waveform stats : max {} min {}".format(self.energy_max, self.energy_min))
if self.max_energy_override is not None:
print("overriding max energy to {}".format(self.max_energy_override))
self.energy_max = self.max_energy_override
self.std_min = params.std_min
def compute_stats(self):
if os.path.exists(self.data_root.joinpath("stats_priorgrad/energy_max_train.npy")) and \
os.path.exists(self.data_root.joinpath("stats_priorgrad/energy_min_train.npy")):
return
# compute audio stats from the dataset
# goal: pre-calculate variance of the frame-level part of the waveform
# which will be used for the modified Gaussian base distribution for PriorGrad model
energy_list = []
print("INFO: computing training set waveform statistics for PriorGrad training...")
for i in tqdm(range(len(self.filenames))):
sr, audio = read(self.filenames[i])
if self.params.sample_rate != sr:
raise ValueError(f'Invalid sample rate {sr}.')
audio = audio / MAX_WAV_VALUE
audio = normalize(audio) * 0.95
# match audio length to self.hop_size * n for evaluation
if (audio.shape[0] % self.params.hop_samples) != 0:
audio = audio[:-(audio.shape[0] % self.params.hop_samples)]
audio = torch.FloatTensor(audio)
spectrogram = get_mel(audio, self.params)
energy = (spectrogram.exp()).sum(1).sqrt()
energy_list.append(energy.squeeze(0))
energy_list = torch.cat(energy_list)
energy_max = energy_list.max().numpy()
energy_min = energy_list.min().numpy()
self.data_root.joinpath("stats_priorgrad").mkdir(exist_ok=True)
print("INFO: stats computed: max energy {} min energy {}".format(energy_max, energy_min))
np.save(str(self.data_root.joinpath("stats_priorgrad/energy_max_train.npy")), energy_max)
np.save(str(self.data_root.joinpath("stats_priorgrad/energy_min_train.npy")), energy_min)
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
audio_filename = self.filenames[idx]
sr, audio = read(audio_filename)
if self.params.sample_rate != sr:
raise ValueError(f'Invalid sample rate {sr}.')
audio = audio / MAX_WAV_VALUE
audio = normalize(audio) * 0.95
# match audio length to self.hop_size * n for evaluation
if (audio.shape[0] % self.params.hop_samples) != 0:
audio = audio[:-(audio.shape[0] % self.params.hop_samples)]
audio = torch.FloatTensor(audio)
if self.is_training:
# get segment of audio
start = random.randint(0, audio.shape[0] - (self.params.crop_mel_frames * self.params.hop_samples))
end = start + (self.params.crop_mel_frames * self.params.hop_samples)
audio = audio[start:end]
spectrogram = get_mel(audio, self.params)
energy = (spectrogram.exp()).sum(1).sqrt()
if self.use_prior:
if self.max_energy_override is not None:
energy = torch.clamp(energy, None, self.max_energy_override)
# normalize to 0~1
target_std = torch.clamp((energy - self.energy_min) / (self.energy_max - self.energy_min), self.std_min, None)
else:
target_std = torch.ones_like(spectrogram[:, 0, :])
return {
'audio': audio, # [T_time]
'spectrogram': spectrogram[0].T, # [T_mel, 80]
'target_std': target_std[0] # [T_mel]
}
class Collator:
def __init__(self, params, is_training=True):
self.params = params
self.is_training = is_training
def collate(self, minibatch):
samples_per_frame = self.params.hop_samples
for record in minibatch:
#Filter out records that aren't long enough.
if len(record['spectrogram']) < self.params.crop_mel_frames:
del record['spectrogram']
del record['audio']
continue
record['spectrogram'] = record['spectrogram'].T
record['target_std'] = record['target_std']
record['target_std'] = torch.repeat_interleave(record['target_std'], samples_per_frame)
record['audio'] = record['audio']
assert record['audio'].shape == record['target_std'].shape
audio = torch.stack([record['audio'] for record in minibatch if 'audio' in record])
spectrogram = torch.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record])
target_std = torch.stack([record['target_std'] for record in minibatch if 'target_std' in record])
return {
'audio': audio,
'spectrogram': spectrogram,
'target_std': target_std
}
def from_path(data_root, filelist, params, is_distributed=False):
dataset = NumpyDataset(data_root, filelist, params, is_training=True)
return torch.utils.data.DataLoader(
dataset,
batch_size=params.batch_size,
collate_fn=Collator(params, is_training=True).collate,
shuffle=not is_distributed,
num_workers=1,
sampler=DistributedSampler(dataset) if is_distributed else None,
pin_memory=False,
drop_last=True)
def from_path_valid(data_root, filelist, params, is_distributed=False):
dataset = NumpyDataset(data_root, filelist, params, is_training=False)
return torch.utils.data.DataLoader(
dataset,
batch_size=1,
collate_fn=Collator(params, is_training=False).collate,
shuffle=False,
num_workers=1,
sampler=DistributedSampler(dataset) if is_distributed else None,
pin_memory=False,
drop_last=False)
| 8,838 | 42.328431 | 122 | py |
NeuralSpeech | NeuralSpeech-master/PriorGrad-vocoder/preprocess.py | # Copyright 2022 (c) Microsoft Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This public codebase switched from the original STFT implementation to HiFi-GAN version to provide compatibility with PriorGrad-acoustic
# The following STFT implementation is based on open-source HiFi-GAN https://github.com/jik876/hifi-gan
# Note: the STFT detail is different to the one used in https://github.com/lmnt-com/diffwave and the PriorGrad ICLR22 paper
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from librosa.filters import mel as librosa_mel_fn
from librosa.util import normalize
from scipy.io.wavfile import read
MAX_WAV_VALUE = 32768.0
mel_basis = {}
hann_window = {}
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
# function to get HiFi-GAN compatible mel-spec with the given audio on-the-fly during training
# which can remove the preprocessing pipeline of the open-source DiffWave (https://github.com/lmnt-com/diffwave)
def get_mel(audio, params, center=False):
n_fft = params.n_fft
num_mels = params.n_mels
sampling_rate = params.sample_rate
hop_size = params.hop_samples
win_size = params.hop_samples * 4
fmin = params.fmin
fmax = params.fmax
y = audio.unsqueeze(0)
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
# complex tensor as default, then use view_as_real for future pytorch compatibility
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
# function to get both audio and mel from filepath. Not used for training (uses on-the-fly mel generation instead).
def get_audio_mel(filepath, params, center=False):
n_fft = params.n_fft
num_mels = params.n_mels
sampling_rate = params.sample_rate
hop_size = params.hop_samples
win_size = params.hop_samples * 4
fmin = params.fmin
fmax = params.fmax
sr, audio = read(filepath)
if params.sample_rate != sr:
raise ValueError(f'Invalid sample rate {sr}.')
# match audio length to self.hop_size * n for evaluation
if (audio.shape[0] % hop_size) != 0:
audio = audio[:-(audio.shape[0] % hop_size)]
audio = audio / MAX_WAV_VALUE
audio = normalize(audio) * 0.95
audio = torch.FloatTensor(audio)
y = audio.unsqueeze(0)
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
# complex tensor as default, then use view_as_real for future pytorch compatibility
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
assert audio.shape[0] == spec.shape[2] * hop_size
return audio.unsqueeze(0), spec | 6,176 | 41.6 | 138 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect2/eval_aishell_nbest.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import sys
import torch
import argparse
import re
#from fairseq.models.transformer import TransformerModel
import os
import os.path
import time
import json
import numpy as np
#os.environ["CUDA_VISIBLE_DEVICES"] = '1'
from fairseq import utils
utils.import_user_module(argparse.Namespace(user_dir='./FastCorrect'))
from FastCorrect.fastcorrect_model import FastCorrectModel
def remove_ch_spaces(input_str):
return re.sub(r"(?<=[\u4e00-\u9fff])(\s+)(?=[\u4e00-\u9fff])", "", input_str.strip())
try:
model_name_or_path = sys.argv[3]
except:
model_name_or_path = "checkpoints/shared_baseline"
try:
iter_decode_max_iter = int(sys.argv[4])
except:
iter_decode_max_iter = -1
try:
edit_thre = float(sys.argv[5])
except:
edit_thre = 0
try:
nbest_infer_type = sys.argv[6]
except:
nbest_infer_type = "predict"
try:
test_epoch = int(sys.argv[7])
checkpoint_file = "checkpoint{}.pt".format(test_epoch)
except:
test_epoch = 'best'
checkpoint_file = "checkpoint_best.pt"
#checkpoint_file = "checkpoint_best.pt"
print("test {}/{}".format(model_name_or_path, checkpoint_file))
data_name_or_path = # <Path-to-AISHELL1-Training-Binary-Data>
bpe = "sentencepiece"
sentencepiece_model = # <path-to-sentencepiece_model>, you can use arbitrary sentencepiece for our pretrained model since it is a char-level model
commonset_dir = "./eval_data"
res_dir = os.path.join(model_name_or_path, ("results_aishell" if (iter_decode_max_iter == -1) else ("results_aishell_b" + str(iter_decode_max_iter) + '_t' + str(edit_thre) + '_' + nbest_infer_type)).replace('results', 'results_' + str(test_epoch)))
tmp_dir = os.path.join(model_name_or_path, ("tmp_aishell" if (iter_decode_max_iter == -1) else ("tmp_aishell_b" + str(iter_decode_max_iter) + '_t' + str(edit_thre) + '_' + nbest_infer_type)).replace('tmp', 'tmp_' + str(test_epoch)))
os.makedirs(res_dir, exist_ok=True)
os.makedirs(tmp_dir, exist_ok=True)
#fout_ex = open(os.path.join(tmp_dir, "exception.log"), "w")
try:
short_set = sys.argv[1].split(',')
except:
raise ValueError()
print("short_set:", short_set)
transf_gec = FastCorrectModel.from_pretrained(model_name_or_path, checkpoint_file=checkpoint_file, data_name_or_path=data_name_or_path, bpe=bpe, sentencepiece_model=sentencepiece_model)
transf_gec.eval()
transf_gec.cuda()
nbest_num = 4
for input_tsv in [os.path.join(commonset_dir, f, "aligned_nbest_token_raw.data.json") for f in short_set]:
all_time = []
eval_origin_dict = json.load(open(input_tsv, 'r', encoding='utf-8'))
translate_input_dict = {}
for k, v in eval_origin_dict["utts"].items():
translate_input_dict[k] = (v["output"][0]["rec_token"].replace('<eos>', '').strip(), v["output"][0]["token"])
translated_output_dict = {}
for k, v in translate_input_dict.items():
#print(v)
text, gt = v
assert len(text.split(" ||| ")) == nbest_num and len(text.split(" ||| ")[0]) > 0
need_skip = False
if not need_skip:
binarized = [transf_gec.binarize(text.replace(" ||| ", " "))]
batched_hypos, exc_time = transf_gec.generate(binarized, nbest_infer=nbest_num, nbest_infer_type=nbest_infer_type, iter_decode_max_iter=iter_decode_max_iter)
translated = [transf_gec.decode(hypos[0]['tokens']) for hypos in batched_hypos][0]
translated = " ".join(translated)
else:
translated = " ".join(text.split(" ||| ")[0].replace('<void>', '').split())
exc_time = 0.0
all_time.append(exc_time)
eval_origin_dict["utts"][k]["output"][0]["rec_text"] = " ".join("".join(translated.split()).replace('▁', ' ').strip().split())
#translated_char = [i for i in eval_origin_dict["utts"][k]["output"][0]["rec_text"]]
eval_origin_dict["utts"][k]["output"][0]["rec_token"] = translated.replace('<void>', '')
#print(eval_origin_dict["utts"][k]["output"][0]["rec_token"])
os.makedirs(os.path.join(res_dir, input_tsv.split('/')[-2]), exist_ok=True)
with open(os.path.join(res_dir, input_tsv.split('/')[-2], input_tsv.split('/')[-2] + "_time.txt"), 'w') as outfile:
outfile.write("{}\t{}\t{}\n".format(len(all_time), sum(all_time), sum(all_time)/len(all_time)))
json.dump(eval_origin_dict, open(os.path.join(res_dir, input_tsv.split('/')[-2], 'data.json'), 'w', encoding='utf-8'), indent=4, sort_keys=True, ensure_ascii=False)
| 4,507 | 39.981818 | 248 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect2/FC_utils/language_pair_dataset.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
import random
import math
logger = logging.getLogger(__name__)
def collate_2d_tokens(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
hidden_size = values[0].size(1)
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size, hidden_size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
pad_to_multiple=1,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
if len(samples[0][key].shape) == 1:
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
elif len(samples[0][key].shape) == 2:
return collate_2d_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
else:
raise ValueError("Unsupported condition!")
def check_alignment(alignment, src_len, tgt_len):
if alignment is None or len(alignment) == 0:
return False
if (
alignment[:, 0].max().item() >= src_len - 1
or alignment[:, 1].max().item() >= tgt_len - 1
):
logger.warning("alignment size mismatch found, skipping alignment!")
return False
return True
def compute_alignment_weights(alignments):
"""
Given a tensor of shape [:, 2] containing the source-target indices
corresponding to the alignments, a weight vector containing the
inverse frequency of each target index is computed.
For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then
a tensor containing [1., 0.5, 0.5, 1] should be returned (since target
index 3 is repeated twice)
"""
align_tgt = alignments[:, 1]
_, align_tgt_i, align_tgt_c = torch.unique(
align_tgt, return_inverse=True, return_counts=True
)
align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]]
return 1.0 / align_weights.float()
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
if len(samples[0]["source"].shape) == 1:
src_lengths = torch.LongTensor(
[s["source"].ne(pad_idx).long().sum() for s in samples]
)
elif len(samples[0]["source"].shape) == 2:
src_lengths = torch.LongTensor(
[s["source"][:, 0].ne(pad_idx).long().sum() for s in samples]
)
else:
raise ValueError("Unsupported condition!")
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("wer_dur", None) is not None:
wer_dur = merge(
"wer_dur",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
wer_dur = wer_dur.index_select(0, sort_order)
to_be_edited = merge(
"to_be_edited",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
to_be_edited = to_be_edited.index_select(0, sort_order)
if samples[0].get("closest_label", None) is not None:
closest_label = merge(
"closest_label",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
closest_label = closest_label.index_select(0, sort_order)
else:
closest_label = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
if samples[0].get("wer_dur", None) is not None:
for_wer_gather = merge(
"for_wer_gather",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
for_wer_gather = for_wer_gather.index_select(0, sort_order)
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
).index_select(0, sort_order)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target)
elif input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
else:
ntokens = src_lengths.sum().item()
if samples[0].get("wer_dur", None) is not None:
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"wer_dur": wer_dur,
"to_be_edited": to_be_edited,
"for_wer_gather": for_wer_gather,
"closest_label": closest_label,
},
"target": target,
}
else:
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select(
0, sort_order
)
if samples[0].get("alignment", None) is not None:
bsz, tgt_sz = batch["target"].shape
src_sz = batch["net_input"]["src_tokens"].shape[1]
offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)
offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz
if left_pad_source:
offsets[:, 0] += src_sz - src_lengths
if left_pad_target:
offsets[:, 1] += tgt_sz - tgt_lengths
alignments = [
alignment + offset
for align_idx, offset, src_len, tgt_len in zip(
sort_order, offsets, src_lengths, tgt_lengths
)
for alignment in [samples[align_idx]["alignment"].view(-1, 2)]
if check_alignment(alignment, src_len, tgt_len)
]
if len(alignments) > 0:
alignments = torch.cat(alignments, dim=0)
align_weights = compute_alignment_weights(alignments)
batch["alignments"] = alignments
batch["align_weights"] = align_weights
if samples[0].get("constraints", None) is not None:
# Collate the packed constraints across the samples, padding to
# the length of the longest sample.
lens = [sample.get("constraints").size(0) for sample in samples]
max_len = max(lens)
constraints = torch.zeros((len(samples), max(lens))).long()
for i, sample in enumerate(samples):
constraints[i, 0 : lens[i]] = samples[i].get("constraints")
batch["constraints"] = constraints
return batch
class LanguagePairDataset(FairseqDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
src (torch.utils.data.Dataset): source dataset to wrap
src_sizes (List[int]): source sentence lengths
src_dict (~fairseq.data.Dictionary): source vocabulary
tgt (torch.utils.data.Dataset, optional): target dataset to wrap
tgt_sizes (List[int], optional): target sentence lengths
tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary
left_pad_source (bool, optional): pad source tensors on the left side
(default: True).
left_pad_target (bool, optional): pad target tensors on the left side
(default: False).
shuffle (bool, optional): shuffle dataset elements before batching
(default: True).
input_feeding (bool, optional): create a shifted version of the targets
to be passed into the model for teacher forcing (default: True).
remove_eos_from_source (bool, optional): if set, removes eos from end
of source if it's present (default: False).
append_eos_to_target (bool, optional): if set, appends eos to end of
target if it's absent (default: False).
align_dataset (torch.utils.data.Dataset, optional): dataset
containing alignments.
constraints (Tensor, optional): 2d tensor with a concatenated, zero-
delimited list of constraints for each sentence.
append_bos (bool, optional): if set, appends bos to the beginning of
source/target sentence.
num_buckets (int, optional): if set to a value greater than 0, then
batches will be bucketed into the given number of batch shapes.
src_lang_id (int, optional): source language ID, if set, the collated batch
will contain a field 'src_lang_id' in 'net_input' which indicates the
source language of the samples.
tgt_lang_id (int, optional): target language ID, if set, the collated batch
will contain a field 'tgt_lang_id' which indicates the target language
of the samples.
"""
def __init__(
self,
src,
src_sizes,
src_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
left_pad_source=True,
left_pad_target=False,
shuffle=True,
input_feeding=True,
remove_eos_from_source=False,
append_eos_to_target=False,
align_dataset=None,
constraints=None,
append_bos=False,
eos=None,
num_buckets=0,
src_lang_id=None,
tgt_lang_id=None,
pad_to_multiple=1,
cal_wer_dur=False,
src_with_werdur=False,
src_with_nbest_werdur=0,
bos_prepended_outside=False,
merge_nbest_werdur='',
break_alignment=False,
copy_beam1=False,
to_be_edited_mask = "",
nbest_infer=0,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
if tgt is not None:
assert len(src) == len(
tgt
), "Source and target must contain the same number of examples"
self.src = src
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.sizes = (
np.vstack((self.src_sizes, self.tgt_sizes)).T
if self.tgt_sizes is not None
else self.src_sizes
)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.shuffle = shuffle
self.input_feeding = input_feeding
self.remove_eos_from_source = remove_eos_from_source
self.append_eos_to_target = append_eos_to_target
self.align_dataset = align_dataset
if self.align_dataset is not None:
assert (
self.tgt_sizes is not None
), "Both source and target needed when alignments are provided"
self.constraints = constraints
self.append_bos = append_bos
self.eos = eos if eos is not None else src_dict.eos()
self.src_lang_id = src_lang_id
self.tgt_lang_id = tgt_lang_id
if num_buckets > 0:
from fairseq.data import BucketPadLengthDataset
self.src = BucketPadLengthDataset(
self.src,
sizes=self.src_sizes,
num_buckets=num_buckets,
pad_idx=self.src_dict.pad(),
left_pad=self.left_pad_source,
)
self.src_sizes = self.src.sizes
logger.info("bucketing source lengths: {}".format(list(self.src.buckets)))
if self.tgt is not None:
self.tgt = BucketPadLengthDataset(
self.tgt,
sizes=self.tgt_sizes,
num_buckets=num_buckets,
pad_idx=self.tgt_dict.pad(),
left_pad=self.left_pad_target,
)
self.tgt_sizes = self.tgt.sizes
logger.info(
"bucketing target lengths: {}".format(list(self.tgt.buckets))
)
# determine bucket sizes using self.num_tokens, which will return
# the padded lengths (thanks to BucketPadLengthDataset)
num_tokens = np.vectorize(self.num_tokens, otypes=[np.long])
self.bucketed_num_tokens = num_tokens(np.arange(len(self.src)))
self.buckets = [
(None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens)
]
else:
self.buckets = None
self.pad_to_multiple = pad_to_multiple
self.cal_wer_dur = cal_wer_dur
self.src_with_werdur = src_with_werdur
self.src_with_nbest_werdur = src_with_nbest_werdur
self.nbest_infer = nbest_infer
self.merge_nbest_werdur = merge_nbest_werdur
self.break_alignment = break_alignment
self.copy_beam1 = copy_beam1
if self.copy_beam1:
assert self.src_with_nbest_werdur or nbest_infer
if nbest_infer:
assert not self.src_with_nbest_werdur
assert not self.src_with_werdur
assert not self.cal_wer_dur
self.bos_prepended_outside = bos_prepended_outside
self.to_be_edited_mask = to_be_edited_mask
if self.cal_wer_dur:
assert not self.src_with_werdur
assert not self.src_with_nbest_werdur
assert not nbest_infer
if self.src_with_werdur:
assert not self.cal_wer_dur
assert not self.src_with_nbest_werdur
assert not nbest_infer
if self.src_with_nbest_werdur:
assert not self.src_with_werdur
assert not self.cal_wer_dur
assert not nbest_infer
if self.merge_nbest_werdur:
assert not self.src_with_werdur
assert not self.cal_wer_dur
def get_batch_shapes(self):
return self.buckets
def calculate_wer_dur(self, hypo_list, ref_list):
len_hyp = len(hypo_list)
len_ref = len(ref_list)
cost_matrix = np.zeros((len_hyp + 1, len_ref + 1), dtype=np.int16)
# 0-equal;2-insertion;3-deletion;1-substitution
ops_matrix = np.zeros((len_hyp + 1, len_ref + 1), dtype=np.int8)
for i in range(len_hyp + 1):
cost_matrix[i][0] = i
for j in range(len_ref + 1):
cost_matrix[0][j] = j
id_ind = 0
for i in range(1, len_hyp + 1):
for j in range(1, len_ref + 1):
ideal_index = i * len_ref / len_hyp
if hypo_list[i-1] == ref_list[j-1]:
cost_matrix[i][j] = cost_matrix[i-1][j-1]
else:
substitution = cost_matrix[i-1][j-1] + 1
insertion = cost_matrix[i-1][j] + 1
deletion = cost_matrix[i][j-1] + 1
compare_val = [substitution, insertion, deletion] # 优先级
if (substitution > insertion) and (insertion == deletion) :
min_val = insertion
if ideal_index >= j:
operation_idx = 2
else:
operation_idx = 3
else:
min_val = min(compare_val)
operation_idx = compare_val.index(min_val) + 1
cost_matrix[i][j] = min_val
ops_matrix[i][j] = operation_idx
i = len_hyp
j = len_ref
# nb_map = {"N": len_ref, "C": 0, "W": 0, "I": 0, "D": 0, "S": 0}
char_map = []
current_chars = []
res_chars = []
while i >= 0 or j >= 0:
i_idx = max(0, i)
j_idx = max(0, j)
if ops_matrix[i_idx][j_idx] == 0: # correct
if i-1 >= 0 and j-1 >= 0:
# match_idx.append((j-1, i-1))
# nb_map['C'] += 1
current_chars.append(ref_list[j-1])
char_map.append([hypo_list[i-1], current_chars])
current_chars = []
i -= 1
j -= 1
# elif ops_matrix[i_idx][j_idx] == 1: # insert
elif ops_matrix[i_idx][j_idx] == 2: # insert
char_map.append([hypo_list[i-1], current_chars])
current_chars = []
i -= 1
# nb_map['I'] += 1
# elif ops_matrix[i_idx][j_idx] == 2: # delete
elif ops_matrix[i_idx][j_idx] == 3: # delete
current_chars.append(ref_list[j-1])
j -= 1
# nb_map['D'] += 1
# elif ops_matrix[i_idx][j_idx] == 3: # substitute
elif ops_matrix[i_idx][j_idx] == 1: # substitute
current_chars.append(ref_list[j-1])
char_map.append([hypo_list[i-1], current_chars])
current_chars = []
i -= 1
j -= 1
# nb_map['S'] += 1
else:
raise ValueError("Impossible condition!")
if i < 0 and j >= 0:
# nb_map['D'] += 1
res_chars.append(ref_list[j])
elif j < 0 and i >= 0:
char_map.append([hypo_list[i], current_chars])
current_chars = []
# nb_map['I'] += 1
# else:
# raise ValueError("Impossible condition!")
if res_chars:
char_map[-1][-1].extend(res_chars)
char_map.reverse()
for i in range(len(char_map)):
char_map[i][-1].reverse()
# match_idx.reverse()
# wrong_cnt = cost_matrix[len_hyp][len_ref]
# nb_map["W"] = wrong_cnt
# print("ref: %s" % " ".join(ref_list))
# print("hyp: %s" % " ".join(hypo_list))
# print(nb_map)
# print("match_idx: %s" % str(match_idx))
result_map = [len(i[1]) for i in char_map]
to_be_modify = [int( (len(i[1]) == 1 and i[1][0] == i[0]) ) for i in char_map]
# to_be_modify = []
# for i in char_map:
# if (len(char_map[i][1]) = 1 and char_map[i][1][0] == char_map[i][0])
# to_be_modify.append(0)
# else:
# for j in range(len(char_map[i][1])):
# to_be_modify.append(1)
#if len(to_be_modify) >= 180:
# print(char_map)
# print(to_be_modify)
assert sum(result_map) == len_ref
assert len(result_map) == len_hyp
for_wer_gather = []
for i in range(len(result_map)):
for j in range(result_map[i]):
for_wer_gather.append(i)
# return wrong_cnt, match_idx, nb_map, char_map
return result_map, to_be_modify, for_wer_gather
def break_beam_alignment(self, src_item, werdur_info):
#print(werdur_info)
# werdur_info_mean = torch.abs(werdur_info).float().mean(-1)
# werdur_info_label = ((werdur_info.float().mean(-1) == 1.0).float() * 2 - 1).long()
new_werdur_info = [[] for _ in range(self.src_with_nbest_werdur)]
new_src_item = [[] for _ in range(self.src_with_nbest_werdur)]
void_token = self.tgt_dict.indices['<void>'] if self.tgt_dict else self.src_dict.indices['<void>']
max_length = src_item.shape[0]
for i in range(max_length):
for j in range(self.src_with_nbest_werdur):
if src_item[i][j] != void_token:
new_src_item[j].append(src_item[i][j])
new_werdur_info[j].append(werdur_info[i][j])
else:
assert werdur_info[i][j] == 0
final_length = max(len(i) for i in new_src_item)
for i in range(self.src_with_nbest_werdur):
for j in range(final_length - len(new_src_item[i])):
new_src_item[i].append(void_token)
new_werdur_info[i].append(0)
return torch.LongTensor(new_src_item).transpose(0, 1), torch.LongTensor(new_werdur_info).transpose(0, 1)
def break_beam_alignment_infer(self, src_item):
#print(werdur_info)
# werdur_info_mean = torch.abs(werdur_info).float().mean(-1)
# werdur_info_label = ((werdur_info.float().mean(-1) == 1.0).float() * 2 - 1).long()
new_src_item = [[] for _ in range(self.nbest_infer)]
void_token = self.tgt_dict.indices['<void>'] if self.tgt_dict else self.src_dict.indices['<void>']
max_length = src_item.shape[0]
for i in range(max_length):
for j in range(self.nbest_infer):
if src_item[i][j] != void_token:
new_src_item[j].append(src_item[i][j])
final_length = max(len(i) for i in new_src_item)
for i in range(self.nbest_infer):
for j in range(final_length - len(new_src_item[i])):
new_src_item[i].append(void_token)
return torch.LongTensor(new_src_item).transpose(0, 1)
def __getitem__(self, index):
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item = self.src[index]
# Append EOS to end of tgt sentence if it does not have an EOS and remove
# EOS from end of src sentence if it exists. This is useful when we use
# use existing datasets for opposite directions i.e., when we want to
# use tgt_dataset as src_dataset and vice versa
if self.append_eos_to_target:
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.tgt and self.tgt[index][-1] != eos:
tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
if self.append_bos:
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
if self.tgt and self.tgt[index][0] != bos:
tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]])
bos = self.src_dict.bos()
if self.src[index][0] != bos:
src_item = torch.cat([torch.LongTensor([bos]), self.src[index]])
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if self.src[index][-1] == eos:
src_item = self.src[index][:-1]
if self.src_with_werdur:
assert not self.src_with_nbest_werdur
# assert not
src_item_length = int(len(src_item))
#print(src_item_length, src_item)
if self.append_bos or self.bos_prepended_outside: # origin 8, append_bos: 9
assert src_item_length % 2 == 1
werdur_info = src_item[(src_item_length+1)//2:].clone() - 32768
werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:(src_item_length+1)//2]
else:
assert src_item_length % 2 == 0
werdur_info = src_item[(src_item_length)//2:].clone() - 32768
# werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:(src_item_length)//2]
to_be_edited = werdur_info.clamp(0, 1)
wer_dur = torch.abs(werdur_info)
for_wer_gather_list = []
to_be_edited_list = [1 for _ in range(len(wer_dur))]
for i in range(len(wer_dur)):
if self.to_be_edited_mask == 'v1':
if int(to_be_edited[i]) == 0:
to_be_masked = self.random_mask(i, len(wer_dur))
for j in to_be_masked:
to_be_edited_list[j] = 0
elif self.to_be_edited_mask == 'v2':
if int(to_be_edited[i]) == 0:
if int(wer_dur[i]) == 1:
to_be_masked = self.random_mask(i, len(wer_dur))
for j in to_be_masked:
to_be_edited_list[j] = 0
else:
to_be_edited_list[i] = 0
for j in range(abs(int(wer_dur[i]))):
for_wer_gather_list.append(i)
assert to_be_edited_list[0] == 1
assert to_be_edited_list[-1] == 1
for_wer_gather = torch.LongTensor(for_wer_gather_list)
if self.to_be_edited_mask == 'v1' or self.to_be_edited_mask == 'v2':
to_be_edited = torch.LongTensor(to_be_edited_list)
try:
assert len(wer_dur) == len(src_item)
assert len(tgt_item) == len(for_wer_gather)
except:
print("src string:")
print(self.src_dict.string(src_item))
print("tgt string:")
print(self.tgt_dict.string(tgt_item))
print(src_item, tgt_item); print(wer_dur, to_be_edited, for_wer_gather)
raise ValueError()
example = {
"id": index,
"source": src_item,
"target": tgt_item,
"wer_dur": wer_dur,
"to_be_edited": to_be_edited,
"for_wer_gather": for_wer_gather,
}
elif self.src_with_nbest_werdur:
# assert not
src_item_length = int(len(src_item))
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
# print(src_item_length, src_item)
if self.append_bos or self.bos_prepended_outside: # origin 8, append_bos: 9
assert (src_item_length - self.src_with_nbest_werdur) % 2 == 1
#print(src_item)
werdur_info = src_item[(src_item_length - self.src_with_nbest_werdur + 1) // 2:][:-1].clone() - 32768 #remove EOS
# werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[1:(src_item_length - self.src_with_nbest_werdur + 1) // 2][:-1] #remove EOS
#print(src_item, werdur_info)
assert len(werdur_info) % self.src_with_nbest_werdur == 0
assert len(src_item) % self.src_with_nbest_werdur == 0
assert len(werdur_info) / self.src_with_nbest_werdur - len(src_item) / self.src_with_nbest_werdur == 1.0
werdur_info = torch.reshape(werdur_info, [self.src_with_nbest_werdur, int(len(werdur_info) / self.src_with_nbest_werdur)]).transpose(0,1)
src_item = torch.reshape(src_item, [self.src_with_nbest_werdur, int(len(src_item) / self.src_with_nbest_werdur)]).transpose(0,1)
if self.copy_beam1:
werdur_info = werdur_info[:, 0][:, None].repeat([1, self.src_with_nbest_werdur])
src_item = src_item[:, 0][:, None].repeat([1, self.src_with_nbest_werdur])
closest_label = werdur_info[-1, :].clone()
werdur_info = werdur_info[:-1, :]
assert src_item.shape == werdur_info.shape
if self.merge_nbest_werdur == '':
werdur_info = torch.cat([torch.LongTensor([[1 for iter_i in range(self.src_with_nbest_werdur)]]), werdur_info, torch.LongTensor([[1 for iter_i in range(self.src_with_nbest_werdur)]])], dim=0)
src_item = torch.cat([torch.LongTensor([[bos for iter_i in range(self.src_with_nbest_werdur)]]), src_item, torch.LongTensor([[eos for iter_i in range(self.src_with_nbest_werdur)]])], dim=0)
else:
raise ValueError("Bad merge_nbest_werdur!" + self.merge_nbest_werdur)
werdur_info = torch.cat(
[torch.LongTensor([1]), werdur_info,
torch.LongTensor([1])], dim=0)
src_item = torch.cat(
[torch.LongTensor([[bos for iter_i in range(self.src_with_nbest_werdur)]]), src_item,
torch.LongTensor([[eos for iter_i in range(self.src_with_nbest_werdur)]])], dim=0)
else:
raise ValueError("many logit not update(merge_nbest_werdur)")
assert (src_item_length - self.src_with_nbest_werdur) % 2 == 0
werdur_info = src_item[(src_item_length - self.src_with_nbest_werdur) // 2:][:-1].clone() - 32768 #remove EOS
# werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:(src_item_length - self.src_with_nbest_werdur) // 2][:-1] #remove EOS
assert len(werdur_info) % self.src_with_nbest_werdur == 0
assert len(src_item) % self.src_with_nbest_werdur == 0
assert len(werdur_info) / self.src_with_nbest_werdur - len(src_item) / self.src_with_nbest_werdur == 1.0
werdur_info = torch.reshape(werdur_info,
[len(werdur_info) / self.src_with_nbest_werdur, self.src_with_nbest_werdur]).transpose(0,1)
src_item = torch.reshape(src_item,
[len(src_item) / self.src_with_nbest_werdur, self.src_with_nbest_werdur]).transpose(0,1)
closest_label = werdur_info[-1, :].clone()
werdur_info = werdur_info[:-1, :]
assert closest_label.shape == werdur_info.shape
werdur_info = torch.cat(
[werdur_info, torch.LongTensor([[1 for iter_i in range(self.src_with_nbest_werdur)]])], dim=0)
src_item = torch.cat(
[src_item, torch.LongTensor([[eos for iter_i in range(self.src_with_nbest_werdur)]])], dim=0)
if self.break_alignment:
#print(src_item)
#print(werdur_info)
src_item, werdur_info = self.break_beam_alignment(src_item, werdur_info)
#print(src_item)
#print(werdur_info)
to_be_edited = werdur_info.clamp(0, 1)
wer_dur = torch.abs(werdur_info)
if not self.merge_nbest_werdur:
for_wer_gather_list = []
# to_be_edited_list = [1 for _ in range(len(wer_dur))]
for k in range(self.src_with_nbest_werdur):
add_to_wer_gather_list = []
for i in range(len(wer_dur)):
# if self.to_be_edited_mask == 'v1':
# if int(to_be_edited[i]) == 0:
# to_be_masked = self.random_mask(i, len(wer_dur))
# for j in to_be_masked:
# to_be_edited_list[j] = 0
# elif self.to_be_edited_mask == 'v2':
# if int(to_be_edited[i]) == 0:
# if int(wer_dur[i]) == 1:
# to_be_masked = self.random_mask(i, len(wer_dur))
# for j in to_be_masked:
# to_be_edited_list[j] = 0
# else:
# to_be_edited_list[i] = 0
for j in range(abs(int(wer_dur[i][k]))):
add_to_wer_gather_list.append(i)
for_wer_gather_list.append(add_to_wer_gather_list)
# assert to_be_edited_list[0] == 1
# assert to_be_edited_list[-1] == 1
'''
print(wer_dur)
print(to_be_edited)
print(src_item)
print(src_item.shape, tgt_item.shape)
print(for_wer_gather_list)
'''
for_wer_gather = torch.LongTensor(for_wer_gather_list).transpose(0, 1)
else:
for_wer_gather_list = []
for i in range(len(wer_dur)):
for j in range(abs(int(wer_dur[i]))):
for_wer_gather_list.append(i)
for_wer_gather = torch.LongTensor(for_wer_gather_list)
# if self.to_be_edited_mask == 'v1' or self.to_be_edited_mask == 'v2':
# to_be_edited = torch.LongTensor(to_be_edited_list)
try:
#if self.src_with_nbest_werdur:
# assert 2 == 3
assert len(wer_dur) == len(src_item)
assert len(tgt_item) == len(for_wer_gather)
except:
print("src string:")
print(self.src_dict.string(src_item))
print("tgt string:")
print(self.tgt_dict.string(tgt_item))
print(src_item, tgt_item)
if self.src_with_nbest_werdur:
print("wer_dur:", wer_dur)
print("to_be_edited", to_be_edited)
print("for_wer_gather", for_wer_gather)
else:
print(wer_dur, to_be_edited, for_wer_gather)
raise ValueError()
example = {
"id": index,
"source": src_item,
"target": tgt_item,
"wer_dur": wer_dur,
"to_be_edited": to_be_edited,
"for_wer_gather": for_wer_gather,
"closest_label": closest_label,
}
elif self.nbest_infer:
# assert not
src_item_length = int(len(src_item))
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
# print(src_item_length, src_item)
if self.append_bos or self.bos_prepended_outside: # origin 8, append_bos: 9
# assert (src_item_length - self.src_with_nbest_werdur) % 2 == 1
# werdur_info = src_item[(src_item_length - self.src_with_nbest_werdur + 1) // 2:][:-1].clone() - 32768 #remove EOS
# werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[1:-1] #remove EOS
#print(src_item, werdur_info)
# assert len(werdur_info) % self.src_with_nbest_werdur == 0
assert len(src_item) % self.nbest_infer == 0
# assert len(werdur_info) / self.src_with_nbest_werdur - len(src_item) / self.src_with_nbest_werdur == 1.0
# werdur_info = torch.reshape(werdur_info, [self.src_with_nbest_werdur, int(len(werdur_info) / self.src_with_nbest_werdur)]).transpose(0,1)
src_item = torch.reshape(src_item, [self.nbest_infer, int(len(src_item) / self.nbest_infer)]).transpose(0,1)
if self.copy_beam1:
src_item = src_item[:, 0][:, None].repeat([1, self.nbest_infer])
# closest_label = werdur_info[-1, :].clone()
# werdur_info = werdur_info[:-1, :]
# assert src_item.shape == werdur_info.shape
# werdur_info = torch.cat([torch.LongTensor([[1 for iter_i in range(self.src_with_nbest_werdur)]]), werdur_info, torch.LongTensor([[1 for iter_i in range(self.src_with_nbest_werdur)]])], dim=0)
src_item = torch.cat([torch.LongTensor([[bos for iter_i in range(self.nbest_infer)]]), src_item, torch.LongTensor([[eos for iter_i in range(self.nbest_infer)]])], dim=0)
else:
# assert (src_item_length - self.src_with_nbest_werdur) % 2 == 0
# werdur_info = src_item[(src_item_length - self.src_with_nbest_werdur) // 2:][:-1].clone() - 32768 #remove EOS
# werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:-1] #remove EOS
# assert len(werdur_info) % self.src_with_nbest_werdur == 0
assert len(src_item) % self.nbest_infer == 0
# assert len(werdur_info) / self.src_with_nbest_werdur - len(src_item) / self.src_with_nbest_werdur == 1.0
# werdur_info = torch.reshape(werdur_info,
# [len(werdur_info) / self.src_with_nbest_werdur, self.src_with_nbest_werdur])
src_item = torch.reshape(src_item,
[int(len(src_item) / self.nbest_infer), self.nbest_infer]).transpose(0,1)
if self.copy_beam1:
src_item = src_item[:, 0][:, None].repeat([1, self.nbest_infer])
# closest_label = werdur_info[-1, :].clone()
# werdur_info = werdur_info[:-1, :]
# assert closest_label.shape == werdur_info.shape
#
# werdur_info = torch.cat(
# [werdur_info, torch.LongTensor([[1 for iter_i in range(self.src_with_nbest_werdur)]])], dim=0)
src_item = torch.cat(
[src_item, torch.LongTensor([[eos for iter_i in range(self.nbest_infer)]])], dim=0)
if self.break_alignment:
src_item = self.break_beam_alignment_infer(src_item)
# to_be_edited = werdur_info.clamp(0, 1)
# wer_dur = torch.abs(werdur_info)
# for_wer_gather_list = []
# to_be_edited_list = [1 for _ in range(len(wer_dur))]
# for k in range(self.src_with_nbest_werdur):
# add_to_wer_gather_list = []
# for i in range(len(wer_dur)):
# # if self.to_be_edited_mask == 'v1':
# # if int(to_be_edited[i]) == 0:
# # to_be_masked = self.random_mask(i, len(wer_dur))
# # for j in to_be_masked:
# # to_be_edited_list[j] = 0
# # elif self.to_be_edited_mask == 'v2':
# # if int(to_be_edited[i]) == 0:
# # if int(wer_dur[i]) == 1:
# # to_be_masked = self.random_mask(i, len(wer_dur))
# # for j in to_be_masked:
# # to_be_edited_list[j] = 0
# # else:
# # to_be_edited_list[i] = 0
# for j in range(abs(int(wer_dur[i][k]))):
# add_to_wer_gather_list.append(i)
# for_wer_gather_list.append(add_to_wer_gather_list)
# assert to_be_edited_list[0] == 1
# assert to_be_edited_list[-1] == 1
'''
print(wer_dur)
print(to_be_edited)
print(src_item)
print(src_item.shape, tgt_item.shape)
print(for_wer_gather_list)
'''
# for_wer_gather = torch.LongTensor(for_wer_gather_list).transpose(0, 1)
# if self.to_be_edited_mask == 'v1' or self.to_be_edited_mask == 'v2':
# to_be_edited = torch.LongTensor(to_be_edited_list)
# try:
# #if self.src_with_nbest_werdur:
# # assert 2 == 3
# assert len(wer_dur) == len(src_item)
# assert len(tgt_item) == len(for_wer_gather)
# except:
# print("src string:")
# print(self.src_dict.string(src_item))
# print("tgt string:")
# print(self.tgt_dict.string(tgt_item))
# print(src_item, tgt_item)
# if self.src_with_nbest_werdur:
# print("wer_dur:", wer_dur)
# print("to_be_edited", to_be_edited)
# print("for_wer_gather", for_wer_gather)
# else:
# print(wer_dur, to_be_edited, for_wer_gather)
# raise ValueError()
example = {
"id": index,
"source": src_item,
"target": tgt_item,
# "wer_dur": wer_dur,
# "to_be_edited": to_be_edited,
# "for_wer_gather": for_wer_gather,
# "closest_label": closest_label,
}
else:
example = {
"id": index,
"source": src_item,
"target": tgt_item,
}
if self.cal_wer_dur:
# assert not self.src_with_werdur
wer_dur, to_be_edited, for_wer_gather = self.calculate_wer_dur(src_item, tgt_item)
example["wer_dur"] = torch.LongTensor(wer_dur)
example["to_be_edited"] = torch.LongTensor(to_be_edited)
example["for_wer_gather"] = torch.LongTensor(for_wer_gather)
if self.align_dataset is not None:
example["alignment"] = self.align_dataset[index]
if self.constraints is not None:
example["constraints"] = self.constraints[index]
return example
def __len__(self):
return len(self.src)
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
pad_to_length (dict, optional): a dictionary of
{'source': source_pad_to_length, 'target': target_pad_to_length}
to indicate the max length to pad to in source and target respectively.
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one
position for teacher forcing, of shape `(bsz, tgt_len)`.
This key will not be present if *input_feeding* is
``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `src_lang_id` (LongTensor): a long Tensor which contains source
language IDs of each sample in the batch
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
- `tgt_lang_id` (LongTensor): a long Tensor which contains target language
IDs of each sample in the batch
"""
res = collate(
samples,
pad_idx=self.src_dict.pad(),
eos_idx=self.eos,
left_pad_source=self.left_pad_source,
left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding,
pad_to_length=pad_to_length,
pad_to_multiple=self.pad_to_multiple,
)
if self.src_lang_id is not None or self.tgt_lang_id is not None:
src_tokens = res["net_input"]["src_tokens"]
bsz = src_tokens.size(0)
if self.src_lang_id is not None:
res["net_input"]["src_lang_id"] = (
torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens)
)
if self.tgt_lang_id is not None:
res["tgt_lang_id"] = (
torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens)
)
return res
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
if self.src_with_nbest_werdur or self.src_with_werdur:
if self.src_with_nbest_werdur:
return max(
self.src_sizes[index] // 2 // self.src_with_nbest_werdur,
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
else:
return max(
self.src_sizes[index] // 2,
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
else:
return max(
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self)).astype(np.int64)
else:
indices = np.arange(len(self), dtype=np.int64)
if self.buckets is None:
# sort by target length, then source length
if self.tgt_sizes is not None:
indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(self.src_sizes[indices], kind="mergesort")]
else:
# sort by bucketed_num_tokens, which is:
# max(padded_src_len, padded_tgt_len)
return indices[
np.argsort(self.bucketed_num_tokens[indices], kind="mergesort")
]
@property
def supports_prefetch(self):
return getattr(self.src, "supports_prefetch", False) and (
getattr(self.tgt, "supports_prefetch", False) or self.tgt is None
)
def prefetch(self, indices):
self.src.prefetch(indices)
if self.tgt is not None:
self.tgt.prefetch(indices)
if self.align_dataset is not None:
self.align_dataset.prefetch(indices)
def filter_indices_by_size(self, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
return data_utils.filter_paired_dataset_indices_by_size(
self.src_sizes,
self.tgt_sizes,
indices,
max_sizes,
)
| 50,130 | 42.974561 | 211 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect2/FC_utils/hub_utils_fc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import Any, Dict, Iterator, List, Tuple
import torch
from fairseq import utils
from fairseq.data import encoders
from torch import nn
import time
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == "checkpoint_file":
checkpoint_file = v
elif (
k != "path"
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path["path"]
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith("."):
kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if "user_dir" in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"]))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
"args": args,
"task": task,
"models": models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(args)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(getattr(args, "replace_unk", None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(
self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs
) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(
self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs
) -> List[str]:
if isinstance(sentences, str):
exc_text, exc_time = self.sample([sentences], beam=beam, verbose=verbose, **kwargs)
return exc_text[0], exc_time
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos, exc_time = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos], exc_time
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [
hypos[0]
for hypos in self.generate(
tokenized_sentences, score_reference=True, **kwargs
)
]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
nbest_infer = 0,
nbest_infer_type="predict",
break_alignment=False,
copy_beam1=False,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, nbest_infer=nbest_infer, nbest_infer_type=nbest_infer_type, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
begin_time_exc = 0.0
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs, nbest_infer):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
assert begin_time_exc == 0.0
begin_time_exc = time.time()
# print("Input shape:", batch["net_input"]["src_tokens"].shape)
# print("Begin:", time.time())
translations = self.task.inference_step(
generator, self.models, batch, nbest_infer_type=nbest_infer_type, **inference_step_args
)
# print("End:", time.time())
exc_time = time.time() - begin_time_exc
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info("S\t{}".format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo["tokens"])
logger.info("H\t{}\t{}".format(hypo["score"], hypo_str))
logger.info(
"P\t{}".format(
" ".join(
map(
lambda x: "{:.4f}".format(x),
hypo["positional_scores"].tolist(),
)
)
)
)
if hypo["alignment"] is not None and getarg(
"print_alignment", False
):
logger.info(
"A\t{}".format(
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in hypo["alignment"]
]
)
)
)
return outputs, exc_time
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool, nbest_infer: int,
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths, nbest_infer),
max_tokens=self.args.max_tokens,
max_sentences=self.args.batch_size,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| 11,447 | 35.810289 | 146 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect2/FC_utils/binarizer_fc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
import torch
from fairseq.file_io import PathManager
from fairseq.tokenizer import tokenize_line
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
class Binarizer:
@staticmethod
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
src_with_werdur=False,
src_with_nbest_werdur=0,
):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
if already_numberized:
assert ' |||| ' not in line, "This constraint is add when doing asr correction exp"
id_strings = line.strip().split()
id_list = [int(id_string) for id_string in id_strings]
if reverse_order:
id_list.reverse()
if append_eos:
id_list.append(dict.eos())
ids = torch.IntTensor(id_list)
else:
if ' |||| ' in line and ' ||| ' in line:
assert src_with_nbest_werdur
assert not src_with_werdur
# print(line)
line, werdur_info = line.split(' |||| ')
nbest_line = line.split(' ||| ')
nbest_werdur_info = werdur_info.split(' ||| ')
assert len(nbest_line) == len(nbest_werdur_info) == src_with_nbest_werdur
line = " ".join([iter_nbest.strip().rsplit(' ', 1)[0] for iter_nbest in nbest_line])
werdur_info = " ".join([iter_werdur.strip() + ' ' + iter_line.strip().rsplit(' ', 1)[1] for iter_line, iter_werdur in zip(nbest_line, nbest_werdur_info)])
# print(line)
# print(werdur_info)
werdur_list = []
for i in werdur_info.strip().split():
assert abs(int(i)) < 30000
werdur_list.append(int(i) + 32768)
if append_eos:
werdur_list.append(1 + 32768)
werdur_list_length = len(werdur_list)
elif ' |||| ' in line:
assert src_with_werdur
assert not src_with_nbest_werdur
line, werdur_info = line.split(' |||| ')
werdur_list = []
for i in werdur_info.strip().split():
assert abs(int(i)) < 30000
werdur_list.append(int(i) + 32768)
if append_eos:
werdur_list.append(1 + 32768)
werdur_list_length = len(werdur_list)
else:
werdur_list = None
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
# print(ids)
if werdur_list is not None:
assert werdur_list_length == len(ids) + src_with_nbest_werdur
ids = torch.cat([ids, torch.IntTensor(werdur_list)], dim=-1)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {
"nseq": nseq,
"nunk": sum(replaced.values()),
"ntok": ntok,
"replaced": replaced,
}
@staticmethod
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(PathManager.get_local_path(filename), "r") as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {"nseq": nseq}
@staticmethod
def find_offsets(filename, num_chunks):
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
| 5,757 | 37.386667 | 178 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect2/FC_utils/options_fc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.data_class import (
CheckpointParams,
CommonEvalParams,
CommonParams,
DatasetParams,
DistributedTrainingParams,
EvalLMParams,
OptimizationParams,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalParams())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonParams())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
group.add_argument("--src-with-werdur", action="store_true", default=False,
help="whether the src file contains werdur-info")
group.add_argument("--src-with-nbest-werdur", metavar="N", default=0, type=int,
help="whether the src file contains nbest werdur-info and how many nbest")
group.add_argument("--pos-before-reshape", action="store_true", default=False,
help="whether apply pos embedding before reshape")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetParams())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingParams(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationParams())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointParams())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalParams())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMParams())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
# fmt: off
group.add_argument('--beam', default=5, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--match-source-len', default=False, action='store_true',
help=('generations should match the source length'))
group.add_argument('--no-early-stop', action='store_true',
help='deprecated')
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
group.add_argument('--score-reference', action='store_true',
help='just score the reference translation')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N',
help='ngram blocking such that this size ngram cannot be repeated in the generation')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',
help='sample from the smallest set whose cumulative probability mass exceeds p for next words')
group.add_argument('--constraints', const="ordered", nargs="?", choices=["ordered", "unordered"],
help='enables lexically constrained decoding')
group.add_argument('--temperature', default=1., type=float, metavar='N',
help='temperature for generation')
group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N',
help='number of groups for Diverse Beam Search')
group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',
help='strength of diversity penalty for Diverse Beam Search')
group.add_argument('--diversity-rate', default=-1.0, type=float, metavar='N',
help='strength of diversity penalty for Diverse Siblings Search')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--print-step', action='store_true')
group.add_argument('--lm-path', default=None, type=str, metavar='PATH',
help='path to lm checkpoint for lm fusion')
group.add_argument('--lm-weight', default=0.0, type=float, metavar='N',
help='weight for lm probs for lm fusion')
# arguments for iterative refinement generator
group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N',
help='if > 0.0, it penalized early-stopping in decoding.')
group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N',
help='maximum iterations for iterative refinement.')
group.add_argument('--iter-decode-force-max-iter', action='store_true',
help='if set, run exact the maximum number of iterations without early stop')
group.add_argument('--iter-decode-with-beam', default=1, type=int, metavar='N',
help='if > 1, model will generate translations varying by the lengths.')
group.add_argument('--iter-decode-with-external-reranker', action='store_true',
help='if set, the last checkpoint are assumed to be a reranker to rescore the translations'),
group.add_argument('--retain-iter-history', action='store_true',
help='if set, decoding returns the whole history of iterative refinement')
group.add_argument('--retain-dropout', action='store_true',
help='Use dropout at inference time')
group.add_argument('--retain-dropout-modules', default=None, nargs='+', type=str,
help='if set, only retain dropout for the specified modules; '
'if not set, then dropout will be retained for all modules')
# special decoding format for advanced decoding.
group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs'])
# fmt: on
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
# fmt: off
group.add_argument('--buffer-size', default=0, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
group.add_argument('--input', default='-', type=str, metavar='FILE',
help='file to read from; use - for stdin')
# fmt: on
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group | 20,160 | 43.802222 | 120 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect2/FC_utils/fastcorrect_generator.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import numpy as np
import torch
from fairseq import utils
import time
import torch.nn.functional as F
DecoderOut = namedtuple(
"FastCorrectDecoderOut",
["output_tokens", "output_scores", "attn", "step", "max_step", "history", "to_be_edited_pred", "wer_dur_pred"],
)
class FastCorrectGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
beam_size=1,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
reranking=False,
edit_thre=0.0,
print_werdur=False
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.beam_size = beam_size
self.reranking = reranking
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
self.edit_thre = edit_thre
self.print_werdur = print_werdur
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None, constraints=None, werdur_gt_str="", nbest_infer_type="predict"):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the IterativeRefinementGenerator is not supported"
)
# TODO: iterative refinement generator does not support ensemble for now.
if not self.retain_dropout:
for model in models:
model.eval()
model, reranker = models[0], None
if self.reranking:
assert len(models) > 1, "Assuming the last checkpoint is the reranker"
assert (
self.beam_size > 1
), "Reranking requires multiple translation for each example"
reranker = models[-1]
models = models[:-1]
if len(models) > 1 and hasattr(model, "enable_ensemble"):
assert model.allow_ensemble, "{} does not support ensembling".format(
model.__class__.__name__
)
model.enable_ensemble(models)
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()[:2]
# initialize
# print("before encoder:", time.time())
encoder_out = model.forward_encoder([src_tokens, src_lengths])
# print("before werdur:", time.time())
if getattr(model.decoder, "wer_dur_weight", None) or getattr(model.decoder, "dur_predictor", None):
prev_decoder_out, encoder_out = model.initialize_output_tokens(encoder_out, src_tokens, self.edit_thre, self.print_werdur, werdur_gt_str=werdur_gt_str, nbest_infer_type=nbest_infer_type)
else:
#raise ValueError("Remove this after debugging")
prev_decoder_out, encoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
# print("before decoder:", time.time())
if self.beam_size > 1:
assert (
model.allow_length_beam
), "{} does not support decoding with length beam.".format(
model.__class__.__name__
)
# regenerate data based on length-beam
length_beam_order = (
utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1)
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, length_beam_order
)
prev_decoder_out = model.regenerate_length_beam(
prev_decoder_out, self.beam_size
)
bsz = bsz * self.beam_size
sent_idxs = torch.arange(bsz)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens,
decoder_out.output_tokens,
decoder_out.output_scores,
decoder_out.attn,
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(
decoder_out.output_tokens.size(0)
).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None
if (decoder_out.attn is None or decoder_out.attn.size(0) == 0)
else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]["history"] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]["history"].append(
finalized_hypos(
step, finalized_history_tokens[j][i], None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated]
if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0)
else None,
history=[h[not_terminated] for h in decoder_out.history]
if decoder_out.history is not None
else None,
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, not_terminated.nonzero(as_tuple=False).squeeze()
)
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.beam_size > 1:
if reranker is not None:
finalized = self.rerank(
reranker, finalized, [src_tokens, src_lengths], self.beam_size
)
# aggregate information from length beam
finalized = [
finalized[
np.argmax(
[
finalized[self.beam_size * i + j][0]["score"]
for j in range(self.beam_size)
]
)
+ self.beam_size * i
]
for i in range(len(finalized) // self.beam_size)
]
return finalized
def rerank(self, reranker, finalized, encoder_input, beam_size):
def rebuild_batch(finalized):
finalized_tokens = [f[0]["tokens"] for f in finalized]
finalized_maxlen = max(f.size(0) for f in finalized_tokens)
final_output_tokens = (
finalized_tokens[0]
.new_zeros(len(finalized_tokens), finalized_maxlen)
.fill_(self.pad)
)
for i, f in enumerate(finalized_tokens):
final_output_tokens[i, : f.size(0)] = f
return final_output_tokens
final_output_tokens = rebuild_batch(finalized)
final_output_tokens[
:, 0
] = self.eos # autoregressive model assumes starting with EOS
reranker_encoder_out = reranker.encoder(*encoder_input)
length_beam_order = (
utils.new_arange(
final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)
)
.t()
.reshape(-1)
)
reranker_encoder_out = reranker.encoder.reorder_encoder_out(
reranker_encoder_out, length_beam_order
)
reranking_scores = reranker.get_normalized_probs(
reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out),
True,
None,
)
reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
reranking_scores = (
reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
)
reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(
reranking_scores
)
for i in range(len(finalized)):
finalized[i][0]["score"] = reranking_scores[i]
return finalized
| 14,093 | 36.684492 | 198 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect2/FastCorrect/fastcorrect_task.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import itertools
import logging
logger = logging.getLogger(__name__)
import torch
from fairseq import utils
from fairseq.data import LanguagePairDataset
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from fairseq.utils import new_arange
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from language_pair_dataset import LanguagePairDataset
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
src_with_werdur=False,
append_eos_to_target=False,
src_with_nbest_werdur=0,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
src_with_werdur=src_with_werdur,
append_eos_to_target=append_eos_to_target,
bos_prepended_outside=prepend_bos,
src_with_nbest_werdur=src_with_nbest_werdur,
)
@register_task("fastcorrect")
class FastCorrectTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument(
'--noise',
default='random_delete',
choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
parser.add_argument(
'--cal-wer-dur', action="store_true", default=False,
help='Whether to cal-wer-dur in dataset')
parser.add_argument(
'--use-wer-dur', action="store_true", default=False,
help='Whether to use wer dur in model')
parser.add_argument(
'--src-with-werdur', action="store_true", default=False,
help='Whether the werdur is in dataset')
parser.add_argument(
'--src-with-nbest-werdur', default=0, type=int,
help="whether the src file contains nbest werdur-info and how many nbest"
)
parser.add_argument(
'--closest-use-which', default='default', type=str,
help="the type of closest label type"
)
parser.add_argument("--pos-before-reshape", action="store_true", default=False,
help="whether apply pos embedding before reshape")
parser.add_argument(
'--use-soft-sigma', action="store_true", default=False,
help='Whether to use soft sigma'
)
parser.add_argument(
"--dur-predictor-type",
type=str,
default="",
help="dur-predictor-type",
)
parser.add_argument(
"--ngram-dur-predict",
type=int,
default=-1,
help="cal ngram dur predict loss",
)
parser.add_argument(
'--globel-dur-loss', action="store_true", default=False,
help='Whether cal global dur loss')
# fmt: on
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
prepend_bos=True,
src_with_werdur=self.args.src_with_werdur,
src_with_nbest_werdur=self.args.src_with_nbest_werdur,
append_eos_to_target=False, # add this although eos already add in data preprocess
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0
)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True
)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = (
2
+ (
(target_length - 2)
* target_score.new_zeros(target_score.size(0), 1).uniform_()
).long()
)
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = (
target_tokens.gather(1, target_rank)
.masked_fill_(target_cutoff, pad)
.gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1])
)
prev_target_tokens = prev_target_tokens[
:, : prev_target_tokens.ne(pad).sum(1).max()
]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = (
target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos)
)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk
)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = (
target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad)
)
return target_tokens.masked_fill(~target_mask, unk)
if self.args.noise == "random_delete":
return _random_delete(target_tokens)
elif self.args.noise == "random_mask":
return _random_mask(target_tokens)
elif self.args.noise == "full_mask":
return _full_mask(target_tokens)
elif self.args.noise == "no_noise":
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args, **unused):
# add models input to match the API for SequenceGenerator
from fastcorrect_generator import FastCorrectGenerator
# print("edit_thre:", getattr(args, "edit_thre", 0.0))
return FastCorrectGenerator(
self.target_dictionary,
eos_penalty=getattr(args, "iter_decode_eos_penalty", 0.0),
max_iter=getattr(args, "iter_decode_max_iter", 10),
beam_size=getattr(args, "iter_decode_with_beam", 1),
reranking=getattr(args, "iter_decode_with_external_reranker", False),
decoding_format=getattr(args, "decoding_format", None),
adaptive=not getattr(args, "iter_decode_force_max_iter", False),
retain_history=getattr(args, "retain_iter_history", False),
edit_thre=getattr(args, "edit_thre", 0.0),
print_werdur=getattr(args, "print_werdur", False),
retain_dropout=getattr(args, "retain_dropout", False)
)
def build_dataset_for_inference(self, src_tokens, src_lengths, nbest_infer=0, constraints=None):
if constraints is not None:
# Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/
raise NotImplementedError(
"Constrained decoding with the translation_lev task is not supported"
)
return LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary, nbest_infer=nbest_infer, append_bos=True,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None, werdur_gt_str="", nbest_infer_type="",
):
with torch.no_grad():
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, constraints=constraints, werdur_gt_str=werdur_gt_str, nbest_infer_type=nbest_infer_type)
| 14,497 | 35.427136 | 148 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect2/FastCorrect/fc_loss.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from torch import Tensor
@register_criterion("fc_loss")
class FastCorrectCriterion(FairseqCriterion):
def __init__(self, task, label_smoothing):
super().__init__(task)
self.label_smoothing = label_smoothing
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument(
"--label-smoothing",
default=0.0,
type=float,
metavar="D",
help="epsilon for label smoothing, 0 means no label smoothing",
)
def _compute_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}
def _custom_loss(self, loss, nll_loss=None, name="loss", factor=1.0):
if nll_loss is not None:
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}
else:
return {"name": name, "loss": loss, "factor": factor}
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
nsentences, ntokens = sample["nsentences"], sample["ntokens"]
# B x T
src_tokens, src_lengths = (
sample["net_input"]["src_tokens"],
sample["net_input"]["src_lengths"],
)
if "wer_dur" in sample["net_input"].keys():
wer_dur = sample["net_input"]["wer_dur"]
to_be_edited = sample["net_input"]["to_be_edited"]
for_wer_gather = sample["net_input"]["for_wer_gather"]
closest_label = sample["net_input"].get("closest_label", None)
else:
wer_dur = None
to_be_edited = None
for_wer_gather = None
tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"]
''' for nbest condition
print("src_tokens[:4]:", src_tokens[:4].transpose(1,2), src_tokens.shape) # torch.Size([32, 44, 4])
print("src_lengths[:4]:", src_lengths[:4], src_lengths.shape) # torch.Size([32])
print("wer_dur[:4]:", wer_dur[:4].transpose(1,2), wer_dur.shape) # torch.Size([32, 44, 4])
print("to_be_edited[:4]:", to_be_edited[:4].transpose(1,2), to_be_edited.shape) # torch.Size([32, 44, 4])
print("for_wer_gather[:4]:", for_wer_gather[:4].transpose(1,2), for_wer_gather.shape) # torch.Size([32, 45, 4])
print("closest_label[:4]:", closest_label[:4], closest_label.shape) # torch.Size([32, 4])
assert 2 == 3
'''
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens, wer_dur, to_be_edited, for_wer_gather, closest_label)
losses, nll_loss = [], []
for obj in outputs:
if outputs[obj].get("loss", None) is None:
_losses = self._compute_loss(
outputs[obj].get("out"),
outputs[obj].get("tgt"),
outputs[obj].get("mask", None),
outputs[obj].get("ls", 0.0),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
else:
_losses = self._custom_loss(
outputs[obj].get("loss"),
outputs[obj].get("nll_loss", None),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
losses += [_losses]
if outputs[obj].get("nll_loss", False):
nll_loss += [_losses.get("nll_loss", 0.0)]
loss = sum(l["loss"] for l in losses)
nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0)
#print("loss, nll_loss", loss, nll_loss)
#for l in losses:
# print(l['name'], l['loss'], utils.item(l["loss"].data / l["factor"]))
# NOTE:
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
for l in losses:
logging_output[l["name"]] = (
utils.item(l["loss"].data / l["factor"])
if reduce
else l[["loss"]].data / l["factor"]
)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs))
metrics.log_scalar(
"loss", loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
for key in logging_outputs[0]:
if key[-5:] == "-loss":
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(
key[:-5],
val / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 7,908 | 37.207729 | 134 | py |
NeuralSpeech | NeuralSpeech-master/FastCorrect2/FastCorrect/fastcorrect_model.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fastcorrect_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder, ensemble_encoder
from fairseq.models.transformer import Embedding
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.modules import (
FairseqDropout,
PositionalEmbedding,
)
from torch import Tensor
from fairseq.models.transformer import (
TransformerEncoder,
)
import torch.nn as nn
from typing import Any, Dict, List, Optional, Tuple
from fairseq.models.fairseq_encoder import EncoderOut
import logging
logger = logging.getLogger(__name__)
def Embeddingright(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
def _mean_pooling(enc_feats, src_masks):
# enc_feats: T x B x C
# src_masks: B x T or None
if src_masks is None:
enc_feats = enc_feats.mean(0)
else:
src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats)
enc_feats = (
(enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None]
).sum(0)
return enc_feats
def _argmax(x, dim):
return (x == x.max(dim, keepdim=True)[0]).type_as(x)
@register_model("fastcorrect")
class FastCorrectModel(FairseqNATModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.to_be_edited_size = getattr(args, "to_be_edited_size", 1)
if getattr(args, 'assist_edit_loss', False):
print("add assist edit loss!")
self.assist_edit_loss = True
else:
self.assist_edit_loss = False
self.werdur_max_predict = getattr(args, 'werdur_max_predict', 5.0)
print("werdur_max_predict: ", self.werdur_max_predict)
self.werdur_loss_type = getattr(args, 'werdur_loss_type', 'l2')
print("werdur_loss_type: ", self.werdur_loss_type)
if self.werdur_loss_type == 'l2':
self.werdur_loss_func = F.mse_loss
elif self.werdur_loss_type == 'log_l2':
self.werdur_loss_func = self.log_mse_loss
elif self.werdur_loss_type == 'l1':
self.werdur_loss_func = F.l1_loss
elif self.werdur_loss_type == 'log_l1':
self.werdur_loss_func = self.log_l1_loss
else:
raise ValueError("Unsupported werdur_loss_type")
self.src_with_nbest_werdur = getattr(args, "src_with_nbest_werdur", 0)
self.closest_label_type = getattr(args, "closest_label_type", "random")
self.closest_use_which = getattr(args, "closest_use_which", "default")
self.encoder_lookup_size = getattr(args, "encoder_lookup_size", -1)
def log_mse_loss(self, hypo, ref, reduction='none'):
hypo = torch.exp(hypo) - 1.0
return F.mse_loss(hypo, ref, reduction=reduction)
def log_l1_loss(self, hypo, ref, reduction='none'):
hypo = torch.exp(hypo) - 1.0
return F.l1_loss(hypo, ref, reduction=reduction)
@property
def allow_length_beam(self):
return True
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
# length prediction
parser.add_argument(
"--remove-edit-emb",
action="store_true",
default=False,
help="whether to remove edit emb",
)
parser.add_argument(
"--assist-edit-loss",
action="store_true",
default=False,
help="whether to use assist edit loss",
)
parser.add_argument(
"--sg-length-pred",
action="store_true",
help="stop the gradients back-propagated from the length predictor",
)
parser.add_argument(
"--length-loss-factor",
type=float,
help="weights on the length prediction loss",
)
parser.add_argument(
"--edit-emb-dim",
type=int,
help="dimension of edit emb",
)
parser.add_argument(
"--to-be-edited-size",
type=int,
help="size of to be edited (2 for edited or not, 4 or insert/delete/change/not do",
)
parser.add_argument(
"--werdur-max-predict",
type=float,
help="max value of werdur",
)
parser.add_argument(
"--werdur-loss-type",
type=str,
help="type of werdur loss",
)
parser.add_argument(
"--closest-label-type",
type=str,
default="random",
help="type of closest label for decoder",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = FastCorrectDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
encoder = FastCorrectEncoder(args, src_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
encoder.apply(init_bert_params)
return encoder
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
**kwargs,
):
"""
Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model
file. Downloads and caches the pre-trained model file if needed.
The base implementation returns a
:class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to
generate translations or sample from language models. The underlying
:class:`~fairseq.models.FairseqModel` can be accessed via the
*generator.models* attribute.
Other models may override this to implement custom hub interfaces.
Args:
model_name_or_path (str): either the name of a pre-trained model to
load or a path/URL to a pre-trained model state dict
checkpoint_file (str, optional): colon-separated list of checkpoint
files in the model archive to ensemble (default: 'model.pt')
data_name_or_path (str, optional): point args.data to the archive
at the given path/URL. Can start with '.' or './' to reuse the
model archive path.
"""
import hub_utils_fc
x = hub_utils_fc.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
**kwargs,
)
cls.upgrade_args(x["args"])
logger.info(x["args"])
return hub_utils_fc.GeneratorHubInterface(x["args"], x["task"], x["models"])
def _compute_nll_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
logits_clo = F.log_softmax(outputs, dim=-1)
losses_clo = F.nll_loss(logits_clo.transpose(1,2), targets.to(outputs.device), reduction="none")
masks_clo = masks.float()
losses_clo = (losses_clo * masks_clo).sum(-1) / masks_clo.sum(-1)
nll_loss_closest = losses_clo.type_as(outputs).detach()
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
#nll_loss_closest = losses.float().type_as(losses).detach()
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}, nll_loss_closest
def forward_encoder(self, encoder_inputs):
src_tokens, src_lengths = encoder_inputs
attn_mask = None
return self.encoder(src_tokens, src_lengths=src_lengths, attn_mask=attn_mask)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, wer_dur=None, to_be_edited=None, for_wer_gather=None, closest_label=None, **kwargs
):
# encoding
# attn_mask = None
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
#print(prev_output_tokens.shape, tgt_tokens.shape)
wer_dur_pred, to_be_edited_pred, closest_pred = self.decoder.forward_wer_dur_and_tbe(
normalize=False, encoder_out=encoder_out
)
wer_dur = wer_dur.type_as(wer_dur_pred).clamp(0.0, self.werdur_max_predict) # modify wer_dur is ok because in decoder only use for gather
src_no_pad = (~(encoder_out.encoder_padding_mask))
wer_dur_pred_glo_loss = torch.Tensor([0.0])[0]
wer_dur_pred_ngram_loss = torch.Tensor([0.0])[0]
wer_dur_pred = wer_dur_pred.squeeze(-1)
wer_dur_pred_loss_float = self.werdur_loss_func(wer_dur_pred, wer_dur, reduction='none').float()
wer_dur_pred_loss = wer_dur_pred_loss_float[src_no_pad.bool()].mean().type_as(wer_dur_pred)
if self.assist_edit_loss:
if self.to_be_edited_size == 1:
if self.src_with_nbest_werdur:
to_be_edited_pred_loss_float = F.binary_cross_entropy_with_logits(to_be_edited_pred.squeeze(-2),
to_be_edited.type_as(
to_be_edited_pred),
reduction='none').float().mean(-1)
else:
to_be_edited_pred_loss_float = F.binary_cross_entropy_with_logits(to_be_edited_pred.squeeze(-1), to_be_edited.type_as(to_be_edited_pred), reduction='none').float()
to_be_edited_pred_loss = to_be_edited_pred_loss_float[src_no_pad.bool()].mean().type_as(to_be_edited_pred)
#print(to_be_edited_pred_loss)
else:
raise ValueError("Unsupported condition!")
if self.src_with_nbest_werdur and (self.closest_label_type != "all"):
batch_size = wer_dur.shape[0]
time_length = wer_dur.shape[1]
hidden_size = encoder_out.encoder_out.shape[-1]
if self.closest_label_type == "random":
ind = torch.empty(batch_size).random_(self.src_with_nbest_werdur).cuda()
else:
raise ValueError("impossible closest_label_type")
wer_dur = torch.gather(wer_dur, 2, ind[:, None, None].repeat([1, time_length, 1]).long()).squeeze(-1)
for_wer_gather = torch.gather(for_wer_gather, 2, ind[:, None, None].repeat([1, tgt_tokens.shape[1], 1]).long()).squeeze(-1)
to_be_edited = torch.gather(to_be_edited, 2, ind[:, None, None].repeat([1, time_length, 1]).long()).squeeze(-1)
encoder_out = encoder_out._replace(encoder_embedding=torch.gather(encoder_out.encoder_embedding, 2, ind[:, None, None, None].repeat([1, time_length, 1, hidden_size]).long()).squeeze(-2))
if self.closest_label_type != "all":
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
wer_dur=wer_dur,
to_be_edited=to_be_edited, for_wer_gather=for_wer_gather, debug_src_tokens=src_tokens, debug_tgt_tokens=tgt_tokens
)
all_word_ins_out = [word_ins_out]
else:
all_word_ins_out = []
batch_size = wer_dur.shape[0]
time_length = wer_dur.shape[1]
hidden_size = encoder_out.encoder_out.shape[-1]
for iter_beam in range(self.src_with_nbest_werdur):
ind = torch.LongTensor([iter_beam]).repeat(batch_size).cuda()
beam_wer_dur = torch.gather(wer_dur, 2, ind[:, None, None].repeat([1, time_length, 1]).long()).squeeze(-1)
beam_for_wer_gather = torch.gather(for_wer_gather, 2,
ind[:, None, None].repeat([1, tgt_tokens.shape[1], 1]).long()).squeeze(-1)
beam_to_be_edited = torch.gather(to_be_edited, 2, ind[:, None, None].repeat([1, time_length, 1]).long()).squeeze(
-1)
beam_encoder_out = encoder_out._replace(encoder_embedding=torch.gather(encoder_out.encoder_embedding, 2,
ind[:, None, None, None].repeat(
[1, time_length, 1,
hidden_size]).long()).squeeze(-2))
beam_word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=beam_encoder_out,
wer_dur=beam_wer_dur,
to_be_edited=beam_to_be_edited, for_wer_gather=beam_for_wer_gather, debug_src_tokens=src_tokens,
debug_tgt_tokens=tgt_tokens
)
all_word_ins_out.append(beam_word_ins_out)
return_dict = {
"wer_dur_loss": {
"loss": wer_dur_pred_loss,
"factor": self.decoder.length_loss_factor,
},
}
closest_dloss_label = []
for iter_beam, word_ins_out in enumerate(all_word_ins_out):
if iter_beam == 0:
return_dict["word_ins"], new_closest_dloss_label = self._compute_nll_loss(
word_ins_out,
tgt_tokens,
tgt_tokens.ne(self.pad),
self.args.label_smoothing,
name="word_ins" + "-loss",
factor=1.0,
)
closest_dloss_label.append(new_closest_dloss_label[:, None])
else:
return_dict["word_ins" + str(iter_beam)], new_closest_dloss_label = self._compute_nll_loss(
word_ins_out,
tgt_tokens,
tgt_tokens.ne(self.pad),
self.args.label_smoothing,
name="word_ins" + str(iter_beam) + "-loss",
factor=1.0,
)
closest_dloss_label.append(new_closest_dloss_label[:, None])
closest_dloss_label = torch.cat(closest_dloss_label, dim=-1)
if (closest_pred is not None) and (self.closest_use_which == "default"):
assert self.src_with_nbest_werdur
closest_loss = F.binary_cross_entropy_with_logits(closest_pred, closest_label.type_as(closest_pred), reduction='none')
closest_loss = closest_loss.float().mean().type_as(closest_loss)
elif self.closest_use_which == "dloss":
assert self.src_with_nbest_werdur
closest_loss = F.mse_loss(closest_pred, closest_dloss_label.type_as(closest_pred),
reduction='none')
closest_loss = closest_loss.float().mean().type_as(closest_loss)
else:
closest_loss = torch.Tensor([0.0])[0]
if self.assist_edit_loss:
return_dict['to_be_edited_loss'] = {
"loss": to_be_edited_pred_loss,
"factor": self.decoder.length_loss_factor,
}
if self.src_with_nbest_werdur:
return_dict['closest_loss'] = {
"loss": closest_loss,
"factor": self.decoder.length_loss_factor,
}
return return_dict
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out.step
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
to_be_edited_pred = decoder_out.to_be_edited_pred
wer_dur_pred = decoder_out.wer_dur_pred
for_wer_gather = wer_dur_pred.cumsum(dim=-1)
for_wer_gather = torch.nn.functional.one_hot(for_wer_gather, num_classes=for_wer_gather.max() + 1)[:, :-1, :-1].sum(-2).cumsum(dim=-1)
# execute the decoder
output_masks = output_tokens.ne(self.pad)
_scores, _tokens = self.decoder(
normalize=True,
prev_output_tokens=output_tokens,
encoder_out=encoder_out,
step=step,
wer_dur=wer_dur_pred,
to_be_edited=to_be_edited_pred, for_wer_gather=for_wer_gather
).max(-1)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
def initialize_output_tokens(self, encoder_out, src_tokens, edit_thre=0.0, print_werdur=False, werdur_gt_str="", nbest_infer_type="predict"):
if getattr(self.decoder, "wer_dur_weight", None) or getattr(self.decoder, "dur_predictor", None):
if not self.src_with_nbest_werdur:
wer_dur_pred, to_be_edited_pred, closest_pred = self.decoder.forward_wer_dur_and_tbe(
normalize=False, encoder_out=encoder_out
)
if 'log' in self.werdur_loss_type:
wer_dur_pred = (torch.exp(wer_dur_pred) - 1.0).squeeze(-1).round().long().clamp_(min=0)
length_tgt = wer_dur_pred.sum(-1)
else:
wer_dur_pred = wer_dur_pred.squeeze(-1).round().long().clamp_(min=0)
length_tgt = wer_dur_pred.sum(-1)
else:
wer_dur_pred, to_be_edited_pred, closest_pred = self.decoder.forward_wer_dur_and_tbe(
normalize=False, encoder_out=encoder_out
)
batch_size = wer_dur_pred.shape[0]
time_length = wer_dur_pred.shape[1]
hidden_size = encoder_out.encoder_out.shape[-1]
if nbest_infer_type == "predict":
ind = torch.argmax(closest_pred, dim=1)
elif nbest_infer_type == "predict_min":
ind = torch.argmin(closest_pred, dim=1)
elif nbest_infer_type == "best1":
ind = closest_pred.new_zeros(batch_size).long()
elif nbest_infer_type == "best2":
ind = closest_pred.new_zeros(batch_size).long() + 1
elif nbest_infer_type == "best3":
ind = closest_pred.new_zeros(batch_size).long() + 2
elif nbest_infer_type == "best4":
ind = closest_pred.new_zeros(batch_size).long() + 3
elif nbest_infer_type == "random":
ind = closest_pred.new_zeros(batch_size).long().random_(self.src_with_nbest_werdur)
elif nbest_infer_type == "werdur_v1":
assert 'log' not in self.werdur_loss_type
ind = torch.argmin(F.mse_loss(wer_dur_pred, wer_dur_pred.round(), reduction="none").mean(1), dim=1)
elif nbest_infer_type.startswith("pdlambda"):
lambda_decoder = float(nbest_infer_type.split("_")[-1])
lambda_predictor = float(nbest_infer_type.split("_")[-2])
ind = torch.argmin(lambda_predictor * F.mse_loss(wer_dur_pred, wer_dur_pred.round(), reduction="none").mean(1) + closest_pred * lambda_decoder,
dim=1)
else:
raise ValueError("Unsupported nbest_infer_type")
wer_dur_pred = torch.gather(wer_dur_pred, 2,
ind[:, None, None].repeat([1, time_length, 1]).long())
if 'log' in self.werdur_loss_type:
wer_dur_pred = (torch.exp(wer_dur_pred) - 1.0).squeeze(-1).round().long().clamp_(min=0)
length_tgt = wer_dur_pred.sum(-1)
else:
wer_dur_pred = wer_dur_pred.squeeze(-1).round().long().clamp_(min=0)
length_tgt = wer_dur_pred.sum(-1)
encoder_out = encoder_out._replace(encoder_embedding=torch.gather(encoder_out.encoder_embedding, 2,
ind[:, None, None, None].repeat(
[1, time_length, 1,
hidden_size]).long()).squeeze(-2))
else:
# length prediction
length_tgt = self.decoder.forward_length_prediction(
self.decoder.forward_length(normalize=True, encoder_out=encoder_out),
encoder_out=encoder_out,
)
to_be_edited_pred = None
wer_dur_pred = None
max_length = length_tgt.clamp_(min=2).max()
#if len(src_tokens.shape) == 3:
# idx_length = utils.new_arange(src_tokens[:, :, 0], max_length)
#else:
idx_length = utils.new_arange(src_tokens, max_length)
initial_output_tokens = src_tokens.new_zeros(
src_tokens.size(0), max_length
).fill_(self.pad)
initial_output_tokens.masked_fill_(
idx_length[None, :] < length_tgt[:, None], self.unk
)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out.encoder_out)
return DecoderOut(
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None,
to_be_edited_pred=None,
wer_dur_pred=wer_dur_pred
), encoder_out
def regenerate_length_beam(self, decoder_out, beam_size):
output_tokens = decoder_out.output_tokens
length_tgt = output_tokens.ne(self.pad).sum(1)
length_tgt = (
length_tgt[:, None]
+ utils.new_arange(length_tgt, 1, beam_size)
- beam_size // 2
)
length_tgt = length_tgt.view(-1).clamp_(min=2)
max_length = length_tgt.max()
idx_length = utils.new_arange(length_tgt, max_length)
initial_output_tokens = output_tokens.new_zeros(
length_tgt.size(0), max_length
).fill_(self.pad)
initial_output_tokens.masked_fill_(
idx_length[None, :] < length_tgt[:, None], self.unk
)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(decoder_out.output_scores)
return decoder_out._replace(
output_tokens=initial_output_tokens, output_scores=initial_output_scores
)
class FastCorrectEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.ensemble_models = None
embed_dim = embed_tokens.embedding_dim
self.src_with_nbest_werdur = getattr(args, "src_with_nbest_werdur", 0)
self.pos_before_reshape = getattr(args, "pos_before_reshape", False)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim * self.src_with_nbest_werdur if self.pos_before_reshape else embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
)
if args.src_with_nbest_werdur:
self.nbest_reshape = nn.Linear(args.src_with_nbest_werdur * embed_dim, embed_dim, bias=False)
else:
self.nbest_reshape = None
@ensemble_encoder
def forward(
self,
src_tokens,
src_lengths,
attn_mask=None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
namedtuple:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
if len(src_tokens.shape) == 3:
encoder_padding_mask = src_tokens[:, :, 0].eq(self.padding_idx)
else:
encoder_padding_mask = src_tokens.eq(self.padding_idx)
encoder_states = [] if return_all_hiddens else None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask, attn_mask=attn_mask)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
return EncoderOut(
encoder_out=x, # T x B x C
encoder_padding_mask=encoder_padding_mask, # B x T
encoder_embedding=encoder_embedding, # B x T x C
encoder_states=encoder_states, # List[T x B x C]
src_tokens=None,
src_lengths=None,
)
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if len(src_tokens.shape) == 2:
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
else:
assert self.nbest_reshape is not None
if self.pos_before_reshape:
if self.embed_positions is not None:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
x = x + self.embed_positions(src_tokens[:, :, 0])
x = self.nbest_reshape(x)
else:
x = self.nbest_reshape(x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]))
if self.embed_positions is not None:
x = x + self.embed_positions(src_tokens[:, :, 0])
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1, eps=1e-12):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=eps)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class DurationPredictor(torch.nn.Module):
def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, ffn_layers=1, offset=1.0, ln_eps=1e-12, remove_edit_emb=False, to_be_edited_size=1, add_glo_biclass=False, padding='SAME'):
"""Initilize duration predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictor, self).__init__()
#'''
self.offset = offset
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.padding = padding
self.remove_edit_emb = remove_edit_emb
self.add_glo_biclass = add_glo_biclass
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [torch.nn.Sequential(
torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1, eps=ln_eps),
FairseqDropout(dropout_rate, module_name="DP_dropout")
)]
if ffn_layers == 1:
self.werdur_linear = torch.nn.Linear(n_chans, 1)
if self.add_glo_biclass:
self.glo_biclass_linear = torch.nn.Linear(n_chans, 1)
self.edit_linear = torch.nn.Linear(n_chans, to_be_edited_size)
else:
assert ffn_layers == 2
self.werdur_linear = torch.nn.Sequential(
torch.nn.Linear(n_chans, n_chans // 2),
torch.nn.ReLU(),
FairseqDropout(dropout_rate, module_name="DP_dropout"),
torch.nn.Linear(n_chans // 2, 1),
)
self.edit_linear = torch.nn.Sequential(
torch.nn.Linear(n_chans, n_chans // 2),
torch.nn.ReLU(),
FairseqDropout(dropout_rate, module_name="DP_dropout"),
torch.nn.Linear(n_chans // 2, to_be_edited_size),
)
if self.add_glo_biclass:
self.glo_biclass_linear = torch.nn.Sequential(
torch.nn.Linear(n_chans, n_chans // 2),
torch.nn.ReLU(),
FairseqDropout(dropout_rate, module_name="DP_dropout"),
torch.nn.Linear(n_chans // 2, 1),
)
#'''
#self.werdur_linear = torch.nn.Linear(idim, 1)
#self.edit_linear = torch.nn.Linear(idim, 1)
def forward(self, xs, x_nonpadding=None):
#'''
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
if self.padding == 'SAME':
xs = F.pad(xs, [self.kernel_size // 2, self.kernel_size // 2])
elif self.padding == 'LEFT':
xs = F.pad(xs, [self.kernel_size - 1, 0])
xs = f(xs) # (B, C, Tmax)
if x_nonpadding is not None:
xs = xs * x_nonpadding[:, None, :]
xs = xs.transpose(1, -1)
#'''
werdur = self.werdur_linear(xs) * x_nonpadding[:, :, None] # (B, Tmax)
to_be_edited = self.edit_linear(xs) * x_nonpadding[:, :, None] # (B, Tmax)
if self.add_glo_biclass:
return werdur, to_be_edited, self.glo_biclass_linear(xs.sum(1) / x_nonpadding.sum(1, keepdim=True))
else:
return werdur, to_be_edited
class FastCorrectDecoder(FairseqNATDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
# try:
# self.mask = dictionary.mask()
# except:
# print("<mask> not found in dictionary!")
# self.mask = None
self.encoder_embed_dim = args.encoder_embed_dim
self.sg_length_pred = getattr(args, "sg_length_pred", False)
self.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
self.to_be_edited_size = getattr(args, "to_be_edited_size", 1)
self.edit_emb_dim = getattr(args, "edit_emb_dim", self.encoder_embed_dim // 2)
self.src_with_nbest_werdur = getattr(args, "src_with_nbest_werdur", 0)
embed_dim = args.decoder_embed_dim
if args.src_with_nbest_werdur:
self.nbest_eout_reshape = torch.nn.Linear(self.encoder_embed_dim * 2, self.encoder_embed_dim,
bias=False)
if getattr(args, "dur_predictor_type", "") == 'v2':
self.dur_predictor = DurationPredictor(idim=self.encoder_embed_dim, n_layers=5, n_chans=self.encoder_embed_dim, ffn_layers=2, ln_eps=1e-5, remove_edit_emb=False, to_be_edited_size=self.to_be_edited_size, add_glo_biclass=bool(self.src_with_nbest_werdur))
assert not getattr(args, "use_wer_dur", False)
else:
raise ValueError("Other type is undefined")
self.pos_before_reshape = getattr(args, "pos_before_reshape", False)
'''
self.embed_positions = (
PositionalEmbedding(
args.max_target_positions,
embed_dim * self.src_with_nbest_werdur if self.pos_before_reshape else embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
)
'''
@ensemble_decoder
def forward(self, normalize, encoder_out, prev_output_tokens, step=0, wer_dur=None, to_be_edited=None, for_wer_gather=None, debug_src_tokens=None, debug_tgt_tokens=None, **unused):
features, _ = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
wer_dur=wer_dur,
to_be_edited=to_be_edited, for_wer_gather=for_wer_gather, debug_src_tokens=debug_src_tokens, debug_tgt_tokens=debug_tgt_tokens
)
decoder_out = self.output_layer(features)
return F.log_softmax(decoder_out, -1) if normalize else decoder_out
@ensemble_decoder
def forward_length(self, normalize, encoder_out):
enc_feats = encoder_out.encoder_out # T x B x C
src_masks = encoder_out.encoder_padding_mask # B x T or None
enc_feats = _mean_pooling(enc_feats, src_masks)
if self.sg_length_pred:
enc_feats = enc_feats.detach()
length_out = F.linear(enc_feats, self.embed_length.weight)
return F.log_softmax(length_out, -1) if normalize else length_out
@ensemble_decoder
def forward_wer_dur_and_tbe(self, normalize, encoder_out):
enc_feats = encoder_out.encoder_out # T x B x C
src_masks = encoder_out.encoder_padding_mask # B x T or None
encoder_embedding = encoder_out.encoder_embedding # B x T x C or B, T, nbest, C
enc_feats = enc_feats.transpose(0, 1)
# enc_feats = _mean_pooling(enc_feats, src_masks)
if self.sg_length_pred:
enc_feats = enc_feats.detach()
src_masks = (~src_masks)
if len(encoder_embedding.shape) == 3:
if self.dur_predictor is not None:
wer_dur_out, to_be_edited_out = self.dur_predictor(enc_feats, src_masks)
else:
wer_dur_out = F.linear(enc_feats, self.wer_dur_weight.weight) * src_masks[:, :, None]
to_be_edited_out = F.linear(enc_feats, self.edit_weight.weight) * src_masks[:, :, None]
closest = None
else:
wer_dur_list = []
to_be_edited_list = []
closest_list = []
for i in range(self.src_with_nbest_werdur):
new_enc_feats = self.nbest_eout_reshape(torch.cat([enc_feats, encoder_embedding[:, :, i, :]], dim=-1))
if self.dur_predictor is not None:
new_wer_dur, new_to_be_edited, new_closest = self.dur_predictor(new_enc_feats, src_masks)
else:
raise ValueError("Only duration predictor is suppored!")
new_wer_dur = F.linear(enc_feats, self.wer_dur_weight.weight) * src_masks[:, :, None]
new_to_be_edited = F.linear(enc_feats, self.edit_weight.weight) * src_masks[:, :, None]
wer_dur_list.append(new_wer_dur)
to_be_edited_list.append(new_to_be_edited[:,:,:,None])
closest_list.append(new_closest)
wer_dur_out = torch.cat(wer_dur_list, dim=-1)
to_be_edited_out = torch.cat(to_be_edited_list, dim=-1)
closest = torch.cat(closest_list, dim=-1)
# return (wer_dur_out), (F.log_softmax(to_be_edited_out, -1) if normalize else to_be_edited_out)
return wer_dur_out, to_be_edited_out, closest
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
early_exit=None,
wer_dur=None,
to_be_edited=None, for_wer_gather=None, debug_src_tokens=None, debug_tgt_tokens=None,
**unused
):
"""
Similar to *forward* but only return features.
Inputs:
prev_output_tokens: Tensor(B, T)
encoder_out: a dictionary of hidden states and masks
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
the LevenshteinTransformer decoder has full-attention to all generated tokens
"""
# embedding
src_embd = encoder_out.encoder_embedding
src_mask = encoder_out.encoder_padding_mask
src_mask = (
~src_mask
if src_mask is not None
else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool()
)
x, decoder_padding_mask = self.forward_embedding(
prev_output_tokens,
self.forward_wer_dur_embedding(
src_embd, src_mask, prev_output_tokens.ne(self.padding_idx), wer_dur, to_be_edited, for_wer_gather, debug_src_tokens=debug_src_tokens, debug_tgt_tokens=debug_tgt_tokens
),
)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for i, layer in enumerate(self.layers):
# early exit from the decoder.
if (early_exit is not None) and (i >= early_exit):
break
x, attn, _ = layer(
x,
encoder_out.encoder_out if encoder_out is not None else None,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": attn, "inner_states": inner_states}
def forward_embedding(self, prev_output_tokens, states=None):
# embed positions
positions = (
self.embed_positions(prev_output_tokens)
if self.embed_positions is not None
else None
)
# embed tokens and positions
if states is None:
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
else:
x = states
if positions is not None:
x += positions
x = self.dropout_module(x)
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
return x, decoder_padding_mask
def forward_wer_dur_embedding(self, src_embeds, src_masks, tgt_masks, wer_dur, to_be_edited, for_wer_gather=None, debug_src_tokens=None, debug_tgt_tokens=None):
# src_embeds: [B, T, C] * [s_T, t_T]
length_sources = src_masks.sum(1)
length_targets = tgt_masks.sum(1)
batch_size, _, hidden_size = src_embeds.shape
max_time = tgt_masks.shape[1]
for_wer_gather = for_wer_gather[:, :, None].long()
to_reshape = torch.gather(src_embeds, 1, for_wer_gather.repeat(1, 1, src_embeds.shape[2]))
to_reshape = to_reshape * tgt_masks[:, :, None]
return to_reshape
def forward_length_prediction(self, length_out, encoder_out, tgt_tokens=None):
enc_feats = encoder_out.encoder_out # T x B x C
src_masks = encoder_out.encoder_padding_mask # B x T or None
if self.pred_length_offset:
if src_masks is None:
src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_(
enc_feats.size(0)
)
else:
src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0)
src_lengs = src_lengs.long()
if tgt_tokens is not None:
# obtain the length target
tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long()
if self.pred_length_offset:
length_tgt = tgt_lengs - src_lengs + 128
else:
length_tgt = tgt_lengs
length_tgt = length_tgt.clamp(min=0, max=255)
else:
# predict the length target (greedy for now)
# TODO: implementing length-beam
pred_lengs = length_out.max(-1)[1]
if self.pred_length_offset:
length_tgt = pred_lengs - 128 + src_lengs
else:
length_tgt = pred_lengs
return length_tgt
@register_model_architecture(
"fastcorrect", "fastcorrect"
)
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
| 47,075 | 40.957219 | 265 | py |
NeuralSpeech | NeuralSpeech-master/CMatchASR/distances.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
def CORAL(source, target):
DEVICE = source.device
d = source.size(1)
ns, nt = source.size(0), target.size(0)
# source covariance
tmp_s = torch.ones((1, ns)).to(DEVICE) @ source
cs = (source.t() @ source - (tmp_s.t() @ tmp_s) / ns) / (ns - 1)
# target covariance
tmp_t = torch.ones((1, nt)).to(DEVICE) @ target
ct = (target.t() @ target - (tmp_t.t() @ tmp_t) / nt) / (nt - 1)
# frobenius norm
loss = (cs - ct).pow(2).sum().sqrt()
loss = loss / (4 * d * d)
return loss
class MMD_loss(torch.nn.Module):
def __init__(self, kernel_type='rbf', kernel_mul=2.0, kernel_num=5):
super(MMD_loss, self).__init__()
self.kernel_num = kernel_num
self.kernel_mul = kernel_mul
self.fix_sigma = None
self.kernel_type = kernel_type
def guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n_samples = int(source.size()[0]) + int(target.size()[0])
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(
int(total.size(0)), int(total.size(0)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(
int(total.size(0)), int(total.size(0)), int(total.size(1)))
L2_distance = ((total0-total1)**2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul**i)
for i in range(kernel_num)]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp)
for bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def linear_mmd2(self, f_of_X, f_of_Y):
loss = 0.0
delta = f_of_X.float().mean(0) - f_of_Y.float().mean(0)
loss = delta.dot(delta.T)
return loss
def forward(self, source, target):
if self.kernel_type == 'linear':
return self.linear_mmd2(source, target)
elif self.kernel_type == 'rbf':
batch_size = int(source.size()[0])
kernels = self.guassian_kernel(
source, target, kernel_mul=self.kernel_mul, kernel_num=self.kernel_num, fix_sigma=self.fix_sigma)
XX = torch.mean(kernels[:batch_size, :batch_size])
YY = torch.mean(kernels[batch_size:, batch_size:])
XY = torch.mean(kernels[:batch_size, batch_size:])
YX = torch.mean(kernels[batch_size:, :batch_size])
loss = torch.mean(XX + YY - XY - YX)
return loss | 2,742 | 38.185714 | 113 | py |
NeuralSpeech | NeuralSpeech-master/CMatchASR/utils.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import logging
from espnet.asr.asr_utils import add_results_to_json
import argparse
import numpy as np
import collections
import json
def str2bool(str):
return True if str.lower() == 'true' else False
def setup_logging(verbose=1):
if verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# Training stats
def dict_average(dic):
avg_key, avg_val = [], []
for key, lst in dic.items():
if key.endswith("_lst"):
avg_key.append(key[:-4])
avg_val.append(np.mean(lst))
for key, val in zip(avg_key, avg_val):
dic[key] = val
return dic
# Load and save
def load_pretrained_model(model, model_path, modules_to_load=None, exclude_modules=None):
'''
load_pretrained_model(model=model, model_path="",
modules_to_load=None, exclude_modules="")
'''
model_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
if exclude_modules:
for e in exclude_modules.split(","):
model_dict = {k: v for k, v in model_dict.items() if not k.startswith(e)}
if not modules_to_load:
src_dict = model_dict
else:
src_dict = {}
for module in modules_to_load.split(","):
src_dict.update({k: v for k, v in model_dict.items() if k.startswith(module)})
dst_state = model.state_dict()
dst_state.update(src_dict)
model.load_state_dict(dst_state)
def torch_save(model, save_path, optimizer=None, local_rank=0):
if local_rank != 0:
return
if hasattr(model, "module"):
state_dict = model.module.state_dict() if not optimizer else collections.OrderedDict(model=model.module.state_dict(), optimizer=optimizer.state_dict())
else:
state_dict = model.state_dict() if not optimizer else collections.OrderedDict(model=model.state_dict(), optimizer=optimizer.state_dict())
torch.save(state_dict, save_path)
def torch_load(snapshot_path, model, optimizer=None):
# load snapshot
snapshot_dict = torch.load(snapshot_path, map_location=lambda storage, loc: storage)
if not "model" in snapshot_dict.keys():
model_dict = snapshot_dict
snapshot_dict = collections.OrderedDict(model=model_dict)
if hasattr(model, "module"):
model.module.load_state_dict(snapshot_dict["model"])
else:
model.load_state_dict(snapshot_dict["model"])
if optimizer:
optimizer.load_state_dict(snapshot_dict["optimizer"])
del snapshot_dict
# Decoding
def compute_wer(ref, hyp, normalize=False):
"""Compute Word Error Rate.
[Reference]
https://martin-thoma.com/word-error-rate-calculation/
Args:
ref (list): words in the reference transcript
hyp (list): words in the predicted transcript
normalize (bool, optional): if True, divide by the length of ref
Returns:
wer (float): Word Error Rate between ref and hyp
n_sub (int): the number of substitution
n_ins (int): the number of insertion
n_del (int): the number of deletion
"""
# Initialisation
d = np.zeros((len(ref) + 1) * (len(hyp) + 1), dtype=np.uint16)
d = d.reshape((len(ref) + 1, len(hyp) + 1))
for i in range(len(ref) + 1):
for j in range(len(hyp) + 1):
if i == 0:
d[0][j] = j
elif j == 0:
d[i][0] = i
# Computation
for i in range(1, len(ref) + 1):
for j in range(1, len(hyp) + 1):
if ref[i - 1] == hyp[j - 1]:
d[i][j] = d[i - 1][j - 1]
else:
sub_tmp = d[i - 1][j - 1] + 1
ins_tmp = d[i][j - 1] + 1
del_tmp = d[i - 1][j] + 1
d[i][j] = min(sub_tmp, ins_tmp, del_tmp)
wer = d[len(ref)][len(hyp)]
# Find out the manipulation steps
x = len(ref)
y = len(hyp)
error_list = []
while True:
if x == 0 and y == 0:
break
else:
if x > 0 and y > 0:
if d[x][y] == d[x - 1][y - 1] and ref[x - 1] == hyp[y - 1]:
error_list.append("C")
x = x - 1
y = y - 1
elif d[x][y] == d[x][y - 1] + 1:
error_list.append("I")
y = y - 1
elif d[x][y] == d[x - 1][y - 1] + 1:
error_list.append("S")
x = x - 1
y = y - 1
else:
error_list.append("D")
x = x - 1
elif x == 0 and y > 0:
if d[x][y] == d[x][y - 1] + 1:
error_list.append("I")
y = y - 1
else:
error_list.append("D")
x = x - 1
elif y == 0 and x > 0:
error_list.append("D")
x = x - 1
else:
raise ValueError
n_sub = error_list.count("S")
n_ins = error_list.count("I")
n_del = error_list.count("D")
n_cor = error_list.count("C")
assert wer == (n_sub + n_ins + n_del)
assert n_cor == (len(ref) - n_sub - n_del)
if normalize:
wer /= len(ref)
return wer, n_sub, n_ins, n_del, n_cor
def recognize_and_evaluate(dataloader, model, args, model_path=None, wer=False, write_to_json=False):
if model_path:
torch_load(model_path, model)
orig_model = model
if hasattr(model, "module"):
model = model.module
if write_to_json:
# read json data
assert args.result_label and args.recog_json
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
model.eval()
recog_args = {
"beam_size": args.beam_size,
"penalty": args.penalty,
"ctc_weight": args.ctc_weight,
"maxlenratio": args.maxlenratio,
"minlenratio": args.minlenratio,
"lm_weight": args.lm_weight,
"rnnlm": args.rnnlm,
"nbest": args.nbest,
"space": args.sym_space,
"blank": args.sym_blank,
}
recog_args = argparse.Namespace(**recog_args)
err_dict = (
dict(cer=None)
if not wer
else dict(cer=collections.defaultdict(int), wer=collections.defaultdict(int))
)
with torch.no_grad():
for batch_idx, data in enumerate(dataloader):
logging.warning(f"Testing CER/WERs: {batch_idx+1}/{len(dataloader)}")
fbank, ilens, tokens = data
fbanks = []
for i, fb in enumerate(fbank):
fbanks.append(fb[: ilens[i], :])
fbank = fbanks
nbest_hyps = model.recognize_batch(
fbank, recog_args, char_list=None, rnnlm=None
)
y_hats = [nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps]
if write_to_json:
for utt_idx in range(len(fbank)):
name = dataloader.dataset[batch_idx][utt_idx][0]
new_js[name] = add_results_to_json(
js[name], nbest_hyps[utt_idx], args.char_list
)
for i, y_hat in enumerate(y_hats):
y_true = tokens[i]
hyp_token = [
args.char_list[int(idx)] for idx in y_hat if int(idx) != -1
]
ref_token = [
args.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
for key in sorted(err_dict.keys()): # cer then wer
if key == "wer":
if args.bpemodel:
ref_token = args.bpemodel.decode_pieces(ref_token).split()
hyp_token = args.bpemodel.decode_pieces(hyp_token).split()
else:
ref_token = (
" ".join(ref_token)
.replace(" ", "")
.replace("<space>", " ")
.split()
) # sclite does not consider the number of spaces when splitting
hyp_token = (
" ".join(hyp_token)
.replace(" ", "")
.replace("<space>", " ")
.split()
)
logging.debug("HYP: " + str(hyp_token))
logging.debug("REF: " + str(ref_token))
utt_err, utt_nsub, utt_nins, utt_ndel, utt_ncor = compute_wer(
ref_token, hyp_token
)
err_dict[key]["n_word"] += len(ref_token)
if utt_err != 0:
err_dict[key]["n_err"] += utt_err # Char / word error
err_dict[key]["n_ser"] += 1 # Sentence error
err_dict[key]["n_cor"] += utt_ncor
err_dict[key]["n_sub"] += utt_nsub
err_dict[key]["n_ins"] += utt_nins
err_dict[key]["n_del"] += utt_ndel
err_dict[key]["n_sent"] += 1
for key in err_dict.keys():
err_dict[key]["err"] = err_dict[key]["n_err"] / err_dict[key]["n_word"] * 100.0
err_dict[key]["ser"] = err_dict[key]["n_ser"] / err_dict[key]["n_word"] * 100.0
torch.cuda.empty_cache()
if write_to_json:
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
model = orig_model
return err_dict | 10,200 | 36.503676 | 159 | py |
NeuralSpeech | NeuralSpeech-master/CMatchASR/ctc_aligner.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# https://github.com/hirofumi0810/neural_sp
import torch
import numpy as np
LOG_0 = -1e10
LOG_1 = 0
def np2tensor(array, device=None):
"""Convert form np.ndarray to torch.Tensor.
Args:
array (np.ndarray): A tensor of any sizes
Returns:
tensor (torch.Tensor):
"""
tensor = torch.from_numpy(array).to(device)
return tensor
def pad_list(xs, pad_value=0., pad_left=False):
"""Convert list of Tensors to a single Tensor with padding.
Args:
xs (list): A list of length `[B]`, which contains Tensors of size `[T, input_size]`
pad_value (float):
pad_left (bool):
Returns:
xs_pad (FloatTensor): `[B, T, input_size]`
"""
bs = len(xs)
max_time = max(x.size(0) for x in xs)
xs_pad = xs[0].new_zeros(bs, max_time, * xs[0].size()[1:]).fill_(pad_value)
for b in range(bs):
if len(xs[b]) == 0:
continue
if pad_left:
xs_pad[b, -xs[b].size(0):] = xs[b]
else:
xs_pad[b, :xs[b].size(0)] = xs[b]
return xs_pad
class CTCForcedAligner(object):
def __init__(self, blank=0, char_list=None):
self.blank = blank
self.symbols = [0, 1, 2, 29, 30]
self.char_list = char_list
def __call__(self, logits, elens, ys):
"""Forced alignment with references.
Args:
logits (FloatTensor): `[B, T, vocab]`
elens (List): length `[B]`
ys (List): length `[B]`, each of which contains a list of size `[L]`
ylens (List): length `[B]`
Returns:
trigger_points (IntTensor): `[B, L]`
"""
ylens = np2tensor(np.fromiter([len(y) for y in ys], dtype=np.int32))
with torch.no_grad():
ys = [np2tensor(np.fromiter(y, dtype=np.int64), logits.device) for y in ys]
ys_in_pad = pad_list(ys, 0)
# zero padding
mask = make_pad_mask(elens.to(logits.device))
mask = mask.unsqueeze(2).expand_as(logits)
logits = logits.masked_fill_(mask == 0, LOG_0)
log_probs = torch.log_softmax(logits, dim=-1).transpose(0, 1) # `[T, B, vocab]`
trigger_points = self.align(log_probs, elens, ys_in_pad, ylens)
return trigger_points
def align(self, log_probs, elens, ys, ylens, add_eos=True):
"""Calculte the best CTC alignment with the forward-backward algorithm.
Args:
log_probs (FloatTensor): `[T, B, vocab]`
elens (FloatTensor): `[B]`
ys (FloatTensor): `[B, L]`
ylens (FloatTensor): `[B]`
add_eos (bool): Use the last time index as a boundary corresponding to <eos>
Returns:
trigger_points (IntTensor): `[B, L]`
"""
xmax, bs, vocab = log_probs.size()
path = _label_to_path(ys, self.blank)
path_lens = 2 * ylens.long() + 1
ymax = ys.size(1)
max_path_len = path.size(1)
assert ys.size() == (bs, ymax), ys.size()
assert path.size() == (bs, ymax * 2 + 1)
alpha = log_probs.new_zeros(bs, max_path_len).fill_(LOG_0)
alpha[:, 0] = LOG_1
beta = alpha.clone()
gamma = alpha.clone()
batch_index = torch.arange(bs, dtype=torch.int64).unsqueeze(1)
frame_index = torch.arange(xmax, dtype=torch.int64).unsqueeze(1).unsqueeze(2)
log_probs_fwd_bwd = log_probs[frame_index, batch_index, path]
same_transition = (path[:, :-2] == path[:, 2:])
outside = torch.arange(max_path_len, dtype=torch.int64) >= path_lens.unsqueeze(1)
log_probs_gold = log_probs[:, batch_index, path]
# forward algorithm
for t in range(xmax):
alpha = _computes_transition(alpha, same_transition, outside,
log_probs_fwd_bwd[t], log_probs_gold[t])
# backward algorithm
r_path = _flip_path(path, path_lens)
log_probs_inv = _flip_label_probability(log_probs, elens.long()) # `[T, B, vocab]`
log_probs_fwd_bwd = _flip_path_probability(log_probs_fwd_bwd, elens.long(), path_lens) # `[T, B, 2*L+1]`
r_same_transition = (r_path[:, :-2] == r_path[:, 2:])
log_probs_inv_gold = log_probs_inv[:, batch_index, r_path]
for t in range(xmax):
beta = _computes_transition(beta, r_same_transition, outside,
log_probs_fwd_bwd[t], log_probs_inv_gold[t])
# pick up the best CTC path
best_aligns = log_probs.new_zeros((bs, xmax), dtype=torch.int64)
# forward algorithm
log_probs_fwd_bwd = _flip_path_probability(log_probs_fwd_bwd, elens.long(), path_lens)
for t in range(xmax):
gamma = _computes_transition(gamma, same_transition, outside,
log_probs_fwd_bwd[t], log_probs_gold[t],
skip_accum=True)
# select paths where gamma is valid
log_probs_fwd_bwd[t] = log_probs_fwd_bwd[t].masked_fill_(gamma == LOG_0, LOG_0)
# pick up the best alignment
offsets = log_probs_fwd_bwd[t].argmax(1)
for b in range(bs):
if t <= elens[b] - 1:
token_idx = path[b, offsets[b]]
best_aligns[b, t] = token_idx
# remove the rest of paths
gamma = log_probs.new_zeros(bs, max_path_len).fill_(LOG_0)
for b in range(bs):
gamma[b, offsets[b]] = LOG_1
# pick up trigger points
trigger_aligns = torch.zeros((bs, xmax), dtype=torch.int64)
trigger_aligns_avg = torch.zeros((bs, xmax), dtype=torch.int64)
trigger_points = log_probs.new_zeros((bs, ymax + 1), dtype=torch.int32) # +1 for <eos>
for b in range(bs):
n_triggers = 0
if add_eos:
trigger_points[b, ylens[b]] = elens[b] - 1
# NOTE: use the last time index as a boundary corresponding to <eos>
# Otherwise, index: 0 is used for <eos>
last_token_idx = None
count = 0
for t in range(elens[b]):
token_idx = best_aligns[b, t]
if token_idx in self.symbols:
trigger_aligns_avg[b, t] = last_token_idx if last_token_idx else 0
count += 1
if token_idx == self.blank:
continue
if not (t == 0 or token_idx != best_aligns[b, t - 1]):
continue
# NOTE: select the most left trigger points
trigger_aligns[b, t] = token_idx
last_token_idx = token_idx
trigger_points[b, n_triggers] = t
n_triggers += 1
assert ylens.sum() == (trigger_aligns != 0).sum()
return trigger_aligns_avg
def _flip_label_probability(log_probs, xlens):
"""Flips a label probability matrix.
This function rotates a label probability matrix and flips it.
``log_probs[i, b, l]`` stores log probability of label ``l`` at ``i``-th
input in ``b``-th batch.
The rotated matrix ``r`` is defined as
``r[i, b, l] = log_probs[i + xlens[b], b, l]``
Args:
cum_log_prob (FloatTensor): `[T, B, vocab]`
xlens (LongTensor): `[B]`
Returns:
FloatTensor: `[T, B, vocab]`
"""
xmax, bs, vocab = log_probs.size()
rotate = (torch.arange(xmax, dtype=torch.int64)[:, None] + xlens) % xmax
return torch.flip(log_probs[rotate[:, :, None],
torch.arange(bs, dtype=torch.int64)[None, :, None],
torch.arange(vocab, dtype=torch.int64)[None, None, :]], dims=[0])
def _flip_path_probability(cum_log_prob, xlens, path_lens):
"""Flips a path probability matrix.
This function returns a path probability matrix and flips it.
``cum_log_prob[i, b, t]`` stores log probability at ``i``-th input and
at time ``t`` in a output sequence in ``b``-th batch.
The rotated matrix ``r`` is defined as
``r[i, j, k] = cum_log_prob[i + xlens[j], j, k + path_lens[j]]``
Args:
cum_log_prob (FloatTensor): `[T, B, 2*L+1]`
xlens (LongTensor): `[B]`
path_lens (LongTensor): `[B]`
Returns:
FloatTensor: `[T, B, 2*L+1]`
"""
xmax, bs, max_path_len = cum_log_prob.size()
rotate_input = ((torch.arange(xmax, dtype=torch.int64)[:, None] + xlens) % xmax)
rotate_label = ((torch.arange(max_path_len, dtype=torch.int64) + path_lens[:, None]) % max_path_len)
return torch.flip(cum_log_prob[rotate_input[:, :, None],
torch.arange(bs, dtype=torch.int64)[None, :, None],
rotate_label], dims=[0, 2])
def _computes_transition(seq_log_prob, same_transition, outside,
cum_log_prob, log_prob_yt, skip_accum=False):
bs, max_path_len = seq_log_prob.size()
mat = seq_log_prob.new_zeros(3, bs, max_path_len).fill_(LOG_0)
mat[0, :, :] = seq_log_prob
mat[1, :, 1:] = seq_log_prob[:, :-1]
mat[2, :, 2:] = seq_log_prob[:, :-2]
# disable transition between the same symbols
# (including blank-to-blank)
mat[2, :, 2:][same_transition] = LOG_0
seq_log_prob = torch.logsumexp(mat, dim=0) # overwrite
seq_log_prob[outside] = LOG_0
if not skip_accum:
cum_log_prob += seq_log_prob
seq_log_prob += log_prob_yt
return seq_log_prob
def make_pad_mask(seq_lens):
"""Make mask for padding.
Args:
seq_lens (IntTensor): `[B]`
Returns:
mask (IntTensor): `[B, T]`
"""
bs = seq_lens.size(0)
max_time = seq_lens.max()
seq_range = torch.arange(0, max_time, dtype=torch.int32, device=seq_lens.device)
seq_range = seq_range.unsqueeze(0).expand(bs, max_time)
mask = seq_range < seq_lens.unsqueeze(-1)
return mask
def _label_to_path(labels, blank):
path = labels.new_zeros(labels.size(0), labels.size(1) * 2 + 1).fill_(blank).long()
path[:, 1::2] = labels
return path
def _flip_path(path, path_lens):
"""Flips label sequence.
This function rotates a label sequence and flips it.
``path[b, t]`` stores a label at time ``t`` in ``b``-th batch.
The rotated matrix ``r`` is defined as
``r[b, t] = path[b, t + path_lens[b]]``
.. ::
a b c d . . a b c d d c b a .
e f . . . -> . . . e f -> f e . . .
g h i j k g h i j k k j i h g
Args:
path (FloatTensor): `[B, 2*L+1]`
path_lens (LongTensor): `[B]`
Returns:
FloatTensor: `[B, 2*L+1]`
"""
bs = path.size(0)
max_path_len = path.size(1)
rotate = (torch.arange(max_path_len) + path_lens[:, None]) % max_path_len
return torch.flip(path[torch.arange(bs, dtype=torch.int64)[:, None], rotate], dims=[1]) | 11,085 | 39.312727 | 113 | py |
NeuralSpeech | NeuralSpeech-master/CMatchASR/e2e_asr_udatransformer.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import collections
from espnet.nets.pytorch_backend.e2e_asr_transformer import *
from espnet.nets.pytorch_backend.e2e_asr_transformer import E2E as SpeechTransformer
from espnet.nets.pytorch_backend.transformer.encoder import *
from espnet.nets.pytorch_backend.transformer.decoder import *
from espnet.nets.pytorch_backend.transformer.encoder_layer import EncoderLayer
from espnet.nets.pytorch_backend.transformer.decoder_layer import DecoderLayer
import torch
from distances import CORAL, MMD_loss
import numpy as np
from ctc_aligner import CTCForcedAligner
def adapt_loss(source, target, adapt_loss="mmd"):
if adapt_loss == "mmd": # 1.0 level
mmd_loss = MMD_loss()
loss = mmd_loss(source, target)
elif adapt_loss == "mmd_linear":
mmd_loss = MMD_loss(kernel_type="linear")
loss = mmd_loss(source, target)
elif adapt_loss == "coral": # 1e-4 level
loss = CORAL(source, target)
else:
raise NotImplementedError(f"Adapt loss type {adapt_loss} is not implemented")
return loss
class Discriminator(torch.nn.Module):
def __init__(self, input_dim=256, hidden_dim=256):
super(Discriminator, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dis1 = torch.nn.Linear(input_dim, hidden_dim)
self.bn = torch.nn.BatchNorm1d(hidden_dim)
self.dis2 = torch.nn.Linear(hidden_dim, 1)
def forward(self, x):
x = torch.nn.functional.relu(self.dis1(x))
x = self.dis2(self.bn(x.permute(0, 2, 1)).permute(0, 2, 1))
x = torch.sigmoid(x)
return x
class ReverseLayerF(torch.autograd.Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
class CustomEncoderLayer(EncoderLayer):
def forward(self, x, mask, cache=None):
residual = x
if self.normalize_before:
x = self.norm1(x)
self.x_norm = x
if cache is None:
x_q = x
else:
assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
x_q = x[:, -1:, :]
residual = residual[:, -1:, :]
mask = None if mask is None else mask[:, -1:, :]
if self.concat_after:
x_concat = torch.cat((x, self.self_attn(x_q, x, x, mask)), dim=-1)
x = residual + self.concat_linear(x_concat)
else:
x = residual + self.dropout(self.self_attn(x_q, x, x, mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
return x, mask
class CustomEncoder(Encoder):
def __init__(
self,
idim,
selfattention_layer_type="selfattn",
attention_dim=256,
attention_heads=4,
conv_wshare=4,
conv_kernel_length=11,
conv_usebias=False,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
attention_dropout_rate=0.0,
input_layer="conv2d",
pos_enc_class=PositionalEncoding,
normalize_before=True,
concat_after=False,
positionwise_layer_type="linear",
positionwise_conv_kernel_size=1,
padding_idx=-1,
):
super().__init__(idim,
selfattention_layer_type,
attention_dim,
attention_heads,
conv_wshare,
conv_kernel_length,
conv_usebias,
linear_units,
num_blocks,
dropout_rate,
positional_dropout_rate,
attention_dropout_rate,
input_layer,
pos_enc_class,
normalize_before,
concat_after,
positionwise_layer_type,
positionwise_conv_kernel_size,
padding_idx)
positionwise_layer, positionwise_layer_args = self.get_positionwise_layer(
positionwise_layer_type,
attention_dim,
linear_units,
dropout_rate,
positionwise_conv_kernel_size,
)
encoder_selfattn_layer = MultiHeadedAttention
encoder_selfattn_layer_args = [
(
attention_heads,
attention_dim,
attention_dropout_rate,
)
] * num_blocks
self.encoders = repeat(
num_blocks,
lambda lnum: CustomEncoderLayer(
attention_dim,
encoder_selfattn_layer(*encoder_selfattn_layer_args[lnum]),
positionwise_layer(*positionwise_layer_args),
dropout_rate,
normalize_before,
concat_after,
),
)
def forward(self, xs, masks, return_repr=False):
"""Encode input sequence.
Args:
xs (torch.Tensor): Input tensor (#batch, time, idim).
masks (torch.Tensor): Mask tensor (#batch, time).
Returns:
torch.Tensor: Output tensor (#batch, time, attention_dim).
torch.Tensor: Mask tensor (#batch, time).
"""
xs, masks = self.embed(xs, masks)
#xs, masks = self.encoders(xs, masks)
final_repr = []
for layer_idx, e in enumerate(self.encoders):
xs, masks = e(xs, masks)
if return_repr and layer_idx > 0:
assert e.x_norm is not None
final_repr.append(e.x_norm)
#print(e.x_norm.mean(), xs.mean())
e.x_norm = None
if self.normalize_before:
xs = self.after_norm(xs)
final_repr.append(xs)
return (xs, masks) if not return_repr else (xs, masks, final_repr)
class CustomDecoderLayer(DecoderLayer):
def forward(self, tgt, tgt_mask, memory, memory_mask, cache=None):
"""Compute decoded features.
Args:
tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
tgt_mask (torch.Tensor): Mask for input tensor (#batch, maxlen_out).
memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, size).
memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
cache (List[torch.Tensor]): List of cached tensors.
Each tensor shape should be (#batch, maxlen_out - 1, size).
Returns:
torch.Tensor: Output tensor(#batch, maxlen_out, size).
torch.Tensor: Mask for output tensor (#batch, maxlen_out).
torch.Tensor: Encoded memory (#batch, maxlen_in, size).
torch.Tensor: Encoded memory mask (#batch, maxlen_in).
"""
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
self.x_norm = tgt
if cache is None:
tgt_q = tgt
tgt_q_mask = tgt_mask
else:
# compute only the last frame query keeping dim: max_time_out -> 1
assert cache.shape == (
tgt.shape[0],
tgt.shape[1] - 1,
self.size,
), f"{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
tgt_q = tgt[:, -1:, :]
residual = residual[:, -1:, :]
tgt_q_mask = None
if tgt_mask is not None:
tgt_q_mask = tgt_mask[:, -1:, :]
if self.concat_after:
tgt_concat = torch.cat(
(tgt_q, self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)), dim=-1
)
x = residual + self.concat_linear1(tgt_concat)
else:
x = residual + self.dropout(self.self_attn(tgt_q, tgt, tgt, tgt_q_mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
if self.concat_after:
x_concat = torch.cat(
(x, self.src_attn(x, memory, memory, memory_mask)), dim=-1
)
x = residual + self.concat_linear2(x_concat)
else:
x = residual + self.dropout(self.src_attn(x, memory, memory, memory_mask))
if not self.normalize_before:
x = self.norm2(x)
residual = x
if self.normalize_before:
x = self.norm3(x)
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm3(x)
if cache is not None:
x = torch.cat([cache, x], dim=1)
return x, tgt_mask, memory, memory_mask
class CustomDecoder(Decoder):
def __init__(
self,
odim,
selfattention_layer_type="selfattn",
attention_dim=256,
attention_heads=4,
conv_wshare=4,
conv_kernel_length=11,
conv_usebias=False,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
self_attention_dropout_rate=0.0,
src_attention_dropout_rate=0.0,
input_layer="embed",
use_output_layer=True,
pos_enc_class=PositionalEncoding,
normalize_before=True,
concat_after=False,
):
super().__init__(odim,
selfattention_layer_type,
attention_dim,
attention_heads,
conv_wshare,
conv_kernel_length,
conv_usebias,
linear_units,
num_blocks,
dropout_rate,
positional_dropout_rate,
self_attention_dropout_rate,
src_attention_dropout_rate,
input_layer,
use_output_layer,
pos_enc_class,
normalize_before,
concat_after,
)
decoder_selfattn_layer = MultiHeadedAttention
decoder_selfattn_layer_args = [
(
attention_heads,
attention_dim,
self_attention_dropout_rate,
)
] * num_blocks
self.decoders = repeat(
num_blocks,
lambda lnum: CustomDecoderLayer(
attention_dim,
decoder_selfattn_layer(*decoder_selfattn_layer_args[lnum]),
MultiHeadedAttention(
attention_heads, attention_dim, src_attention_dropout_rate
),
PositionwiseFeedForward(attention_dim, linear_units, dropout_rate),
dropout_rate,
normalize_before,
concat_after,
),
)
def forward(self, tgt, tgt_mask, memory, memory_mask, return_repr=False):
"""Forward decoder.
Args:
tgt (torch.Tensor): Input token ids, int64 (#batch, maxlen_out) if
input_layer == "embed". In the other case, input tensor
(#batch, maxlen_out, odim).
tgt_mask (torch.Tensor): Input token mask (#batch, maxlen_out).
dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+
(include 1.2).
memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, feat).
memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
dtype=torch.uint8 in PyTorch 1.2- and dtype=torch.bool in PyTorch 1.2+
(include 1.2).
Returns:
torch.Tensor: Decoded token score before softmax (#batch, maxlen_out, odim)
if use_output_layer is True. In the other case,final block outputs
(#batch, maxlen_out, attention_dim).
torch.Tensor: Score mask before softmax (#batch, maxlen_out).
"""
x = self.embed(tgt)
# x, tgt_mask, memory, memory_mask = self.decoders(
# x, tgt_mask, memory, memory_mask
# )
final_repr = []
for layer_idx, decoder in enumerate(self.decoders):
x, tgt_mask, memory, memory_mask = decoder(
x, tgt_mask, memory, memory_mask
)
if return_repr and layer_idx > 0:
assert decoder.x_norm is not None
final_repr.append(decoder.x_norm)
decoder.x_norm = None
if self.normalize_before:
x = self.after_norm(x)
final_repr.append(x)
if self.output_layer is not None:
x = self.output_layer(x)
return (x, tgt_mask, None) if not return_repr else (x, tgt_mask, final_repr)
class CustomSpeechTransformer(SpeechTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
idim, odim = args
args = kwargs["args"]
self.encoder = CustomEncoder(
idim=idim,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_encoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
)
self.decoder = CustomDecoder(
odim=odim,
selfattention_layer_type=args.transformer_decoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_decoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
)
class UDASpeechTransformer(CustomSpeechTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
args = kwargs["args"]
assert hasattr(args, "transfer_type")
assert isinstance(args.self_training, bool)
self.loss_type = args.transfer_type
self.self_training = args.self_training
self.n_classes = len(args.char_list)
self.multi_enc_repr_num = args.multi_enc_repr_num
self.multi_dec_repr_num = args.multi_dec_repr_num
self.use_dec_repr = args.use_dec_repr
self.pseudo_ctc_confidence_thr = args.pseudo_ctc_confidence_thr # Threshold for filtering CTC outputs
self.cmatch_method = args.cmatch_method
self.ctc_aligner = CTCForcedAligner(char_list=None)
self.char_list = args.char_list
self.bpemodel = None
self.non_char_symbols = list(map(int, args.non_char_symbols.split("_")))
if self.loss_type:
if "cmatch" in self.loss_type:
assert args.cmatch_method is not None, "CMatch method is required."
assert self.non_char_symbols is not None, "Non-character symbol list must be specified"
elif self.loss_type == "adv":
self.domain_classifier = Discriminator(input_dim=self.adim, hidden_dim=self.adim)
def forward(self,
xs_pad,
ilens,
ys_pad,
tgt_xs_pad=None,
tgt_ilens=None,
tgt_ys_pad=None):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:return: ctc loss value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
dec_return_repr = True
enc_return_repr = True
# 1. forward encoder
xs_pad = xs_pad[:, : max(ilens)] # for data parallel
src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2)
if enc_return_repr:
hs_pad, hs_mask, src_enc_repr = self.encoder(xs_pad, src_mask, return_repr=enc_return_repr)
src_enc_repr = src_enc_repr[-self.multi_enc_repr_num:]
else:
hs_pad, hs_mask = self.encoder(xs_pad, src_mask)
self.hs_pad = hs_pad
# 2. forward decoder
if self.decoder is not None:
ys_in_pad, ys_out_pad = add_sos_eos(
ys_pad, self.sos, self.eos, self.ignore_id
)
ys_mask = target_mask(ys_in_pad, self.ignore_id)
pred_pad, pred_mask, src_dec_repr = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask, return_repr=dec_return_repr)
if src_dec_repr:
src_dec_repr = src_dec_repr[-self.multi_dec_repr_num:]
self.pred_pad = pred_pad
# 3. compute attention loss
loss_att = self.criterion(pred_pad, ys_out_pad)
self.acc = th_accuracy(
pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id
)
else:
loss_att = None
self.acc = None
cer_ctc = None
if self.mtlalpha == 0.0:
loss_ctc = None
else:
batch_size = xs_pad.size(0)
hs_len = hs_mask.view(batch_size, -1).sum(1)
loss_ctc = self.ctc(hs_pad.view(batch_size, -1, self.adim), hs_len, ys_pad)
if not self.training and self.error_calculator is not None:
ys_hat = self.ctc.argmax(hs_pad.view(batch_size, -1, self.adim)).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
# for visualization
if not self.training:
self.ctc.softmax(hs_pad)
# 5. compute cer/wer
if self.training or self.error_calculator is None or self.decoder is None:
cer, wer = None, None
else:
ys_hat = pred_pad.argmax(dim=-1)
cer, wer = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
src_hlens = torch.tensor([int(sum(mask[0])) for mask in hs_mask])
if self.cmatch_method == "pseudo_ctc_pred":
src_hs_flatten = torch.cat([hs_pad[i, :src_hlens[i], :].view(-1, self.adim) for i in range(len(hs_pad))]) # hs_pad: B * T, F
src_ctc_softmax = torch.nn.functional.softmax(self.ctc.ctc_lo(src_hs_flatten), dim=1)
else:
src_ctc_softmax = None
# Domain adversarial loss
if tgt_xs_pad is not None and tgt_ilens is not None:
src_ys_pad = ys_pad
src_hs_pad, src_hs_mask = hs_pad, hs_mask
tgt_xs_pad = tgt_xs_pad[:, : max(tgt_ilens)] # for data parallel
tgt_src_mask = make_non_pad_mask(tgt_ilens.tolist()).to(tgt_xs_pad.device).unsqueeze(-2)
if enc_return_repr:
tgt_hs_pad, tgt_hs_mask, tgt_enc_repr = self.encoder(tgt_xs_pad, tgt_src_mask, return_repr=enc_return_repr)
tgt_enc_repr = tgt_enc_repr[-self.multi_enc_repr_num:]
else:
tgt_hs_pad, tgt_hs_mask = self.encoder(tgt_xs_pad, tgt_src_mask)
src_ys_out_pad = ys_out_pad
src_ys_out_flatten = src_ys_out_pad.contiguous().view(-1)
if tgt_ys_pad is not None:
tgt_ys_in_pad, tgt_ys_out_pad = add_sos_eos(
tgt_ys_pad, self.sos, self.eos, self.ignore_id
)
tgt_ys_mask = target_mask(tgt_ys_in_pad, self.ignore_id)
tgt_pred_pad, tgt_pred_mask, tgt_dec_repr = self.decoder(tgt_ys_in_pad, tgt_ys_mask, tgt_hs_pad, tgt_hs_mask, return_repr=dec_return_repr)
if tgt_dec_repr:
tgt_dec_repr = tgt_dec_repr[-self.multi_dec_repr_num:]
tgt_ys_out_flatten = tgt_ys_out_pad.contiguous().view(-1)
tgt_hlens = torch.tensor([int(sum(mask[0])) for mask in tgt_hs_mask])
if self.cmatch_method == "pseudo_ctc_pred":
tgt_hs_flatten = torch.cat([tgt_hs_pad[i, :tgt_hlens[i], :].view(-1, self.adim) for i in range(len(tgt_hs_pad))]) # hs_pad: B * T, F
tgt_ctc_softmax = torch.nn.functional.softmax(self.ctc.ctc_lo(tgt_hs_flatten), dim=1)
else:
tgt_ctc_softmax = None
if self.self_training:
src_loss_att = loss_att
src_loss_ctc = loss_ctc
tgt_batch_size = tgt_xs_pad.size(0)
tgt_hs_len = tgt_hs_mask.view(tgt_batch_size, -1).sum(1)
tgt_loss_ctc = self.ctc(tgt_hs_pad.view(tgt_batch_size, -1, self.adim), tgt_hs_len, tgt_ys_pad)
tgt_loss_att = self.criterion(tgt_pred_pad, tgt_ys_out_pad)
loss_att = (src_loss_att + tgt_loss_att) / 2
loss_ctc = (src_loss_ctc + tgt_loss_ctc) / 2
self.acc = (self.acc + th_accuracy(
tgt_pred_pad.view(-1, self.odim), tgt_ys_out_pad, ignore_label=self.ignore_id
)) / 2
uda_loss = torch.tensor(0.0).cuda()
if not self.loss_type:
uda_loss = torch.tensor(0.0).cuda()
elif self.loss_type == "adv":
uda_loss = self.adversarial_loss(src_hs_pad, tgt_hs_pad)
elif self.loss_type == "cmatch":
assert tgt_ys_pad is not None
assert len(src_enc_repr) == self.multi_enc_repr_num and len(src_enc_repr) in [1, 3, 6, 9, 12], len(src_enc_repr)
for layer_idx in range(len(src_enc_repr)):
src_hs_flatten, src_ys_flatten, tgt_hs_flatten, tgt_ys_flatten \
= self.get_enc_repr(src_enc_repr[layer_idx],
src_hlens,
tgt_enc_repr[layer_idx],
tgt_hlens,
src_ys_pad,
tgt_ys_pad,
method=self.cmatch_method,
src_ctc_softmax=src_ctc_softmax,
tgt_ctc_softmax=tgt_ctc_softmax,)
layer_uda_loss = self.cmatch_loss_func(self.n_classes,
src_hs_flatten,
src_ys_flatten,
tgt_hs_flatten,
tgt_ys_flatten)
uda_loss = uda_loss + layer_uda_loss if uda_loss else layer_uda_loss
if self.use_dec_repr:
assert len(src_dec_repr) == self.multi_dec_repr_num, len(src_dec_repr)
# No need to calculate decoder matching loss
for layer_idx in range(len(src_dec_repr)):
src_repr_flatten = src_dec_repr[layer_idx].contiguous().view(-1, self.adim)
tgt_repr_flatten = tgt_dec_repr[layer_idx].contiguous().view(-1, self.adim)
layer_uda_loss = self.cmatch_loss_func(self.n_classes,
src_repr_flatten,
src_ys_out_flatten,
tgt_repr_flatten,
tgt_ys_out_flatten)
uda_loss = uda_loss + layer_uda_loss
elif self.loss_type in ["coral", "mmd"]:
# (B, T, F) --> (B, F)
uda_loss = adapt_loss(torch.mean(src_hs_pad, dim=1),
torch.mean(tgt_hs_pad, dim=1),
adapt_loss=self.loss_type)
else:
raise NotImplementedError(f"loss type {self.loss_type} is not implemented")
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
loss_att_data = float(loss_att)
loss_ctc_data = None
elif alpha == 1:
self.loss = loss_ctc
loss_att_data = None
loss_ctc_data = float(loss_ctc)
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
loss_att_data = float(loss_att)
loss_ctc_data = float(loss_ctc)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data, loss_att_data, self.acc, cer_ctc, cer, wer, loss_data
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return (self.loss, uda_loss) if (self.training and uda_loss is not None) else self.loss
def adversarial_loss(self, src_hs_pad, tgt_hs_pad, alpha=1.0):
loss_fn = torch.nn.BCELoss()
src_hs_pad = ReverseLayerF.apply(src_hs_pad, alpha)
tgt_hs_pad = ReverseLayerF.apply(tgt_hs_pad, alpha)
src_domain = self.domain_classifier(src_hs_pad).view(-1, 1) # B, T, 1
tgt_domain = self.domain_classifier(tgt_hs_pad).view(-1, 1) # B, T, 1
device = src_hs_pad.device
src_label = torch.ones(len(src_domain)).long().to(device)
tgt_label = torch.zeros(len(tgt_domain)).long().to(device)
domain_pred = torch.cat([src_domain, tgt_domain], dim=0)
domain_label = torch.cat([src_label, tgt_label], dim=0)
uda_loss = loss_fn(domain_pred, domain_label[:, None].float()) # B, 1
return uda_loss
def get_enc_repr(self,
src_hs_pad,
src_hlens,
tgt_hs_pad,
tgt_hlens,
src_ys_pad,
tgt_ys_pad,
method,
src_ctc_softmax=None,
tgt_ctc_softmax=None):
src_ys = [y[y != self.ignore_id] for y in src_ys_pad]
tgt_ys = [y[y != self.ignore_id] for y in tgt_ys_pad]
if method == "frame_average":
def frame_average(hidden_states, num):
# hs_i, B T F
hidden_states = hidden_states.permute(0, 2, 1)
downsampled_states = torch.nn.functional.adaptive_avg_pool1d(hidden_states, num)
downsampled_states = downsampled_states.permute(0, 2, 1)
assert downsampled_states.shape[1] == num, f"{downsampled_states.shape[1]}, {num}"
return downsampled_states
src_hs_downsampled = frame_average(src_hs_pad, num=src_ys_pad.size(1))
tgt_hs_downsampled = frame_average(tgt_hs_pad, num=tgt_ys_pad.size(1))
src_hs_flatten = src_hs_downsampled.contiguous().view(-1, self.adim)
tgt_hs_flatten = tgt_hs_downsampled.contiguous().view(-1, self.adim)
src_ys_flatten = src_ys_pad.contiguous().view(-1)
tgt_ys_flatten = tgt_ys_pad.contiguous().view(-1)
elif method == "ctc_align":
src_ys = [y[y != -1] for y in src_ys_pad]
src_logits = self.ctc.ctc_lo(src_hs_pad)
src_align_pad = self.ctc_aligner(src_logits, src_hlens, src_ys)
src_ys_flatten = torch.cat([src_align_pad[i, :src_hlens[i]].view(-1) for i in range(len(src_align_pad))])
src_hs_flatten = torch.cat([src_hs_pad[i, :src_hlens[i], :].view(-1, self.adim) for i in range(len(src_hs_pad))]) # hs_pad: B, T, F
tgt_ys = [y[y != -1] for y in tgt_ys_pad]
tgt_logits = self.ctc.ctc_lo(tgt_hs_pad)
tgt_align_pad = self.ctc_aligner(tgt_logits, tgt_hlens, tgt_ys)
tgt_ys_flatten = torch.cat([tgt_align_pad[i, :tgt_hlens[i]].view(-1) for i in range(len(tgt_align_pad))])
tgt_hs_flatten = torch.cat([tgt_hs_pad[i, :tgt_hlens[i], :].view(-1, self.adim) for i in range(len(tgt_hs_pad))]) # hs_pad: B, T, F
elif method == "pseudo_ctc_pred":
assert src_ctc_softmax is not None
src_hs_flatten = torch.cat([src_hs_pad[i, :src_hlens[i], :].view(-1, self.adim) for i in range(len(src_hs_pad))]) # hs_pad: B * T, F
src_hs_flatten_size = src_hs_flatten.shape[0]
src_confidence, src_ctc_ys = torch.max(src_ctc_softmax, dim=1)
src_confidence_mask = (src_confidence > self.pseudo_ctc_confidence_thr)
src_ys_flatten = src_ctc_ys[src_confidence_mask]
src_hs_flatten = src_hs_flatten[src_confidence_mask]
assert tgt_ctc_softmax is not None
tgt_hs_flatten = torch.cat([tgt_hs_pad[i, :tgt_hlens[i], :].view(-1, self.adim) for i in range(len(tgt_hs_pad))]) # hs_pad: B * T, F
tgt_hs_flatten_size = tgt_hs_flatten.shape[0]
tgt_confidence, tgt_ctc_ys = torch.max(tgt_ctc_softmax, dim=1)
tgt_confidence_mask = (tgt_confidence > self.pseudo_ctc_confidence_thr)
tgt_ys_flatten = tgt_ctc_ys[tgt_confidence_mask]
tgt_hs_flatten = tgt_hs_flatten[tgt_confidence_mask]
# logging.warning(f"Source pseudo CTC ratio: {src_hs_flatten.shape[0] / src_hs_flatten_size:.2f}; " \
# f"Target pseudo CTC ratio: {tgt_hs_flatten.shape[0] / tgt_hs_flatten_size:.2f}")
return src_hs_flatten, src_ys_flatten, tgt_hs_flatten, tgt_ys_flatten
def cmatch_loss_func(self, n_classes,
src_features, src_labels,
tgt_features, tgt_labels):
assert src_features.shape[0] == src_labels.shape[0]
assert tgt_features.shape[0] == tgt_labels.shape[0]
classes = torch.arange(n_classes)
def src_token_idxs(c):
return src_labels.eq(c).nonzero().squeeze(1)
src_token_idxs = list(map(src_token_idxs, classes))
def tgt_token_idxs(c):
return tgt_labels.eq(c).nonzero().squeeze(1)
tgt_token_idxs = list(map(tgt_token_idxs, classes))
assert len(src_token_idxs) == n_classes
assert len(tgt_token_idxs) == n_classes
loss = torch.tensor(0.0).cuda()
count = 0
for c in classes:
if c in self.non_char_symbols or src_token_idxs[c].shape[0] < 5 or tgt_token_idxs[c].shape[0] < 5:
continue
loss = loss + adapt_loss(src_features[src_token_idxs[c]],
tgt_features[tgt_token_idxs[c]],
adapt_loss='mmd_linear')
count = count + 1
loss = loss / count if count > 0 else loss
return loss | 31,542 | 43.741844 | 154 | py |
NeuralSpeech | NeuralSpeech-master/CMatchASR/data_load.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from espnet.utils.training.batchfy import make_batchset
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
import torch
import os
import json
import kaldiio
import random
import logging
import sentencepiece as spm
data_config = {
"librispeech": {
"train": "dump/train_960/deltafalse/data_unigram5000.json",
"val": "dump/dev_clean/deltafalse/data_unigram5000.json",
"test": "dump/test_clean/deltafalse/data_unigram5000.json",
"token": "data/lang_char/train_960_unigram5000_units.txt",
"prefix": "/espnet/egs/librispeech/asr1/",
},
"wsj": {
"train": "dump/train_si284/deltafalse/data.json",
"val": "dump/test_dev93/deltafalse/data.json",
"test": "dump/test_eval92/deltafalse/data.json",
"token": "data/lang_1char/train_si284_units.txt",
"prefix": "/opt/espnet/egs/wsj/asr1/",
},
"an4": {
"train": "dump/train_nodev/deltafalse/data.json",
"val": "dump/train_dev/deltafalse/data.json",
"test": "dump/test/deltafalse/data.json",
"token": "data/lang_1char/train_nodev_units.txt",
"prefix": "/home/jindwang/mine/espnet/egs/an4/asr1/",
},
"libriadapt_en_us_clean_matrix": {
"train": "dump/en_us_clean_matrix/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_us_clean_matrix/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_us_clean_matrix/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_us_clean_matrix/train_unigram31.model",
},
"libriadapt_en_us_clean_usb": {
"train": "dump/en_us_clean_usb/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_us_clean_usb/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_us_clean_usb/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_us_clean_usb/train_unigram31.model",
},
"libriadapt_en_us_clean_pseye": {
"train": "dump/en_us_clean_pseye/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_us_clean_pseye/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_us_clean_pseye/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_us_clean_pseye/train_unigram31.model",
},
"libriadapt_en_us_clean_respeaker": {
"train": "dump/en_us_clean_respeaker/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_us_clean_respeaker/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_us_clean_respeaker/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_us_clean_respeaker/train_unigram31.model",
},
"libriadapt_en_us_rain_respeaker": {
"train": "dump/en_us_rain_respeaker/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_us_rain_respeaker/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_us_rain_respeaker/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_us_rain_respeaker/train_unigram31.model",
},
"libriadapt_en_us_wind_respeaker": {
"train": "dump/en_us_wind_respeaker/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_us_wind_respeaker/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_us_wind_respeaker/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_us_wind_respeaker/train_unigram31.model",
},
"libriadapt_en_us_laughter_respeaker": {
"train": "dump/en_us_laughter_respeaker/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_us_laughter_respeaker/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_us_laughter_respeaker/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_us_laughter_respeaker/train_unigram31.model",
},
"libriadapt_en_us_clean_shure": {
"train": "dump/en_us_clean_shure/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_us_clean_shure/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_us_clean_shure/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_us_clean_shure/train_unigram31.model",
},
"libriadapt_en_gb_clean_shure": {
"train": "dump/en_gb_clean_shure/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_gb_clean_shure/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_gb_clean_shure/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_gb_clean_shure/train_unigram31.model",
},
"libriadapt_en_in_clean_shure": {
"train": "dump/en_in_clean_shure/train/deltafalse/data_unigram31.json",
"val": None,
"test": "dump/en_in_clean_shure/test/deltafalse/data_unigram31.json",
"token": "data/lang_char/en_in_clean_shure/train_unigram31_units.txt",
"prefix": "/D_data/libriadapt_processed/asr1/",
"bpemodel": "data/lang_char/en_in_clean_shure/train_unigram31.model",
},
}
def read_json_file(fname):
with open(fname, "rb") as f:
contents = json.load(f)["utts"]
return contents
def load_json(train_json_file, dev_json_file, test_json_file):
train_json = read_json_file(train_json_file)
if os.path.isfile(dev_json_file) and not "tmp_dev_set" in dev_json_file:
dev_json = read_json_file(dev_json_file)
else:
n_samples = len(train_json)
train_size = int(0.9 * n_samples)
logging.warning(
f"No dev set provided, will split the last {n_samples - train_size} (10%) samples from training data"
)
train_json_item = list(train_json.items())
# random.shuffle(train_json_item)
train_json = dict(train_json_item[:train_size])
dev_json = dict(train_json_item[train_size:])
# Save temp dev set
with open(dev_json_file, "w") as f:
json.dump({"utts": dev_json}, f)
logging.warning(f"Temporary dev set saved: {dev_json_file}")
test_json = read_json_file(test_json_file)
logging.warning(f"#Train Json {train_json_file}: {len(train_json)}")
logging.warning(f"#Dev Json {dev_json_file}: {len(dev_json)}")
logging.warning(f"#Test Json {test_json_file}: {len(test_json)}")
return train_json, dev_json, test_json
def load_data(root_path, dataset, args,
pseudo_label_json=None, pseudo_label_filtering=True, use_pseudo_label=True):
def collate(minibatch):
fbanks = []
tokens = []
for _, info in minibatch[0]:
fbanks.append(
torch.tensor(
kaldiio.load_mat(
info["input"][0]["feat"].replace(
data_config[dataset]["prefix"], root_path
)
)
)
)
if use_pseudo_label and "pseudo_tokenid" in info["output"][0].keys():
tokens.append(
torch.tensor([int(s) for s in info["output"][0]["pseudo_tokenid"].split()])
)
else:
tokens.append(
torch.tensor([int(s) for s in info["output"][0]["tokenid"].split()])
)
ilens = torch.tensor([x.shape[0] for x in fbanks])
return (
pad_sequence(fbanks, batch_first=True, padding_value=0),
ilens,
pad_sequence(tokens, batch_first=True, padding_value=-1),
)
train_json = os.path.join(root_path, data_config[dataset]["train"])
dev_json = (
os.path.join(root_path, data_config[dataset]["val"])
if data_config[dataset]["val"]
else f"{root_path}/tmp_dev_set_{dataset}.json"
)
test_json = os.path.join(root_path, data_config[dataset]["test"])
train_json, dev_json, test_json = load_json(train_json, dev_json, test_json)
_, info = next(iter(train_json.items()))
if use_pseudo_label and pseudo_label_json:
psuedo_label_json = read_json_file(pseudo_label_json)
assert psuedo_label_json.keys() == train_json.keys() or list(psuedo_label_json.keys())[:25685] == list(train_json.keys()), \
"Keys of pseudo label and training data not matched"
for key in train_json.keys():
train_json[key]['output'][0]['pseudo_tokenid'] = ' '.join(psuedo_label_json[key]['output'][0]['rec_tokenid'].split()[:-1])
train_json[key]['output'][0]["score"] = psuedo_label_json[key]['output'][0]['score']
if use_pseudo_label and pseudo_label_json and pseudo_label_filtering:
filtered_sample = 0
filtered_ratio = 0.3 if pseudo_label_filtering else 0.0
train_json = sorted(train_json.items(), key=lambda x:x[1]['output'][0]['score'], reverse=True)
sample_num = len(train_json)
train_json = train_json[:int(sample_num * (1 - filtered_ratio))]
logging.warning(f"Filtering: {len(train_json)}/{sample_num} pseudo-labelled samples are kept")
train_json = dict(train_json)
filtered_sample = 0
idim = info["input"][0]["shape"][1]
odim = info["output"][0]["shape"][1]
use_sortagrad = False # args.sortagrad == -1 or args.sortagrad > 0
trainset = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if (args.ngpu > 1 and not args.dist_train) else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
devset = make_batchset(
dev_json,
args.batch_size if args.ngpu <= 1 else int(args.batch_size / args.ngpu),
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
testset = make_batchset(
test_json,
args.batch_size if args.ngpu <= 1 else int(args.batch_size / args.ngpu),
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
if args.dist_train and args.ngpu > 1:
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
else:
train_sampler = None
train_loader = DataLoader(
trainset,
batch_size=1,
collate_fn=collate,
num_workers=args.n_iter_processes,
shuffle=(train_sampler is None and not args.pseudo_labeling),
pin_memory=True,
sampler=train_sampler,
)
dev_loader = DataLoader(
devset,
batch_size=1,
collate_fn=collate,
shuffle=False,
num_workers=args.n_iter_processes,
pin_memory=True,
)
test_loader = DataLoader(
testset,
batch_size=1,
collate_fn=collate,
shuffle=False,
num_workers=args.n_iter_processes,
pin_memory=True,
)
return (train_loader, dev_loader, test_loader), (idim, odim)
def load_token_list(token_file):
with open(token_file, "r") as f:
token_list = [entry.split()[0] for entry in f]
token_list.insert(0, "<blank>")
token_list.append("<eos>")
return token_list
def load_bpemodel(root_path, dataset):
bpemodel_path = os.path.join(root_path, data_config[dataset]["bpemodel"])
bpemodel = spm.SentencePieceProcessor()
bpemodel.Load(bpemodel_path)
return bpemodel
| 12,738 | 41.042904 | 134 | py |
NeuralSpeech | NeuralSpeech-master/CMatchASR/train.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import os
import collections
from espnet.bin.asr_train import get_parser
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.asr.pytorch_backend.asr_init import freeze_modules
from torch.nn.parallel import data_parallel
from torch.nn.utils.clip_grad import clip_grad_norm_
import torch
import numpy as np
import data_load
import random
import json
import sys
from utils import setup_logging, str2bool, dict_average
from utils import load_pretrained_model, torch_save, torch_load
from utils import recognize_and_evaluate
from e2e_asr_udatransformer import UDASpeechTransformer
def add_custom_arguments(parser):
parser.add_argument('--data_file', type=str, default=None)
parser.add_argument("--root_path", type=str, required=True, help="Path to the ESPnet features, e.g.: <espnet_path>/egs/libriadapt_processed/asr1/")
parser.add_argument('--dataset', type=str, required=True,
help="Dataset name to be referred in data_load, e.g.: libriadapt_en_us_clean_shure")
parser.add_argument("--exp", type=str, default="exp")
parser.add_argument("--decoding_mode", type=str2bool, default=False, help="if true, then only perform decoding test")
parser.add_argument("--load_pretrained_model", type=str, default="", nargs="?",
help="<model_path>:<load_modules>:<exclude_modules>")
parser.add_argument("--compute_cer", type=str2bool, default=True)
parser.add_argument("--compute_cer_interval", type=int, default=1)
parser.add_argument("--start_eval_errs", type=int, default=70)
parser.add_argument("--decoding_config", type=str, default=None)
parser.add_argument(
"--bpemodel", type=bool, default=True
) # Set to true when testing CER/WERs
parser.add_argument("--dist_train", type=str2bool, default=False)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--result_label", type=str, default=None)
parser.add_argument("--recog_json", type=str, default=None)
parser.add_argument("--adam_lr", type=float, default=1e-3)
# Transfer learning related
parser.add_argument("--transfer_type", type=str, default=None, help="adaptation method")
parser.add_argument("--cmatch_method", type=str, default="frame_average",
choices=["ctc_align", "frame_average", "pseudo_ctc_pred"],
help="label assignment methods for CMatch")
parser.add_argument("--tgt_dataset", type=str, default="", nargs="?")
parser.add_argument("--transfer_loss_weight", type=float, default=10.0)
parser.add_argument("--tranfer_loss_weight_warmup_steps", type=int, default=0)
parser.add_argument("--pseudo_labeling", type=str2bool, default=False)
parser.add_argument("--pseudo_label_json", type=str, default="", nargs="?")
parser.add_argument("--non_char_symbols", type=str, default=None, nargs="?",
help="Indices of non-character symbols that will be filtered when computing CMatch loss, split by '_', e.g., 0_1_2_29_30")
parser.add_argument("--self_training", type=str2bool, default=False)
parser.add_argument("--multi_enc_repr_num", type=int, default=1)
parser.add_argument("--multi_dec_repr_num", type=int, default=6)
parser.add_argument("--use_dec_repr", type=str2bool, default=False)
parser.add_argument("--pseudo_ctc_confidence_thr", type=float, default=0.9, help="Threshold for filtering CTC outputs")
def test(dataloader, model, model_path=None):
if model_path:
torch_load(model_path, model)
model.eval()
stats = collections.defaultdict(list)
for batch_idx, data in enumerate(dataloader):
logging.warning(f"Testing batch: {batch_idx+1}/{len(dataloader)}")
fbank, seq_lens, tokens = data
fbank, seq_lens, tokens = fbank.cuda(), seq_lens.cuda(), tokens.cuda()
with torch.no_grad():
loss = model(fbank, seq_lens, tokens)
stats["loss_lst"].append(loss.item())
if not hasattr(model, "module"):
if model.acc is not None:
stats["acc_lst"].append(model.acc)
model.acc = None
else:
if model.module.acc is not None:
stats["acc_lst"].append(model.module.acc)
model.module.acc = None
return dict_average(stats)
def train(dataloaders, model, optimizer, save_path):
train_loader, val_loader, test_loader = dataloaders
best_loss = float("inf")
early_stop = 0
log_json = []
for epoch in range(args.start_epoch, args.epochs + 1):
early_stop += 1
epoch_stats = collections.OrderedDict(epoch=epoch)
train_stats = train_epoch(train_loader, model, optimizer, epoch)
valid_stats = test(val_loader, model)
test_stats = test(test_loader, model)
logging.warning(
f"Epoch: {epoch}, Iteration: {epoch * len(train_loader)}, "
+ f"train loss: {train_stats['loss']:.4f}, dev loss: {valid_stats['loss']:.3f}, test loss: {test_stats['loss']:.3f}"
)
torch_save(model, f"{args.outdir}/snapshot.ep.{epoch}", optimizer=optimizer)
for key in sorted(list(set(list(train_stats.keys()) + list(test_stats.keys())))):
if not key.endswith("_lst"):
if key in train_stats:
epoch_stats[f"main/{key}"] = train_stats[key]
if key in valid_stats:
epoch_stats[f"validation/main/{key}"] = valid_stats[key]
if key in test_stats:
epoch_stats[f"test/main/{key}"] = test_stats[key]
log_json.append(epoch_stats)
with open(f"{args.outdir}/log", "w") as f:
json.dump(log_json, f,
indent=4,
ensure_ascii=False,
separators=(",", ": "),
)
logging.warning(f"Log saved at {args.outdir}/log")
if args.patience > 0 and early_stop >= args.patience:
test_stats = test(test_loader, model, save_path)
logging.warning(f"=====Early stop! Final best test loss: {test_stats['loss']}")
break
def train_epoch(dataloader, model, optimizer, epoch=None):
model.train()
stats = collections.defaultdict(list)
for batch_idx, data in enumerate(dataloader):
fbank, seq_lens, tokens = data
fbank, seq_lens, tokens = fbank.cuda(), seq_lens.cuda(), tokens.cuda()
optimizer.zero_grad()
if args.ngpu <= 1 or args.dist_train:
loss = model(fbank, seq_lens, tokens).mean() # / self.accum_grad
else:
# apex does not support torch.nn.DataParallel
loss = (
data_parallel(model, (fbank, seq_lens, tokens), range(args.ngpu)).mean() # / self.accum_grad
)
if not hasattr(model, "module"):
if hasattr(model, "acc") and model.acc is not None:
stats["acc_lst"].append(model.acc)
model.acc = None
else:
if hasattr(model, "acc") and model.module.acc is not None:
stats["acc_lst"].append(model.module.acc)
model.module.acc = None
loss.backward()
clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
stats["loss_lst"].append(loss.item())
logging.warning(f"Training batch: {batch_idx+1}/{len(dataloader)}")
return dict_average(stats)
def train_uda_epoch(train_loaders, model, optimizer, epoch):
src_loader, tgt_loader = train_loaders
iter_source, iter_target = iter(src_loader), iter(tgt_loader)
model.train()
stats = collections.defaultdict(list)
n_batch = min(len(src_loader), len(tgt_loader))
for batch_idx in range(n_batch):
src_data = iter_source.next()
for i in range(len(src_data)):
src_data[i] = src_data[i].cuda()
tgt_data = iter_target.next()
for i in range(len(tgt_data)):
tgt_data[i] = tgt_data[i].cuda()
optimizer.zero_grad()
if args.ngpu <= 1 or args.dist_train:
ctc_att_loss, uda_loss = model(*src_data, *tgt_data)
else:
# apex does not support torch.nn.DataParallel
ctc_att_loss, uda_loss = (
data_parallel(model, (*src_data, *tgt_data), range(args.ngpu))
)
ctc_att_loss = ctc_att_loss.mean()
loss = ctc_att_loss
if args.transfer_loss_weight > 0:
if args.tranfer_loss_weight_warmup_steps > 0:
current_iter = float(batch_idx + (epoch - 1) * n_batch)
frac_done = 1.0 * float(current_iter) / args.tranfer_loss_weight_warmup_steps
current_weight = args.transfer_loss_weight * min(1.0, frac_done)
stats["transfer_loss_weight"] = current_weight
else:
current_weight = args.transfer_loss_weight
transfer_loss = uda_loss.mean()
loss = ctc_att_loss + current_weight * transfer_loss
if not hasattr(model, "module"):
if hasattr(model, "acc") and model.acc is not None:
stats["acc_lst"].append(model.acc)
model.acc = None
else:
if hasattr(model, "acc") and model.module.acc is not None:
stats["acc_lst"].append(model.module.acc)
model.module.acc = None
loss.backward()
clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
stats["ctc_att_loss_lst"].append(ctc_att_loss.item())
if args.transfer_loss_weight > 0:
stats["transfer_loss_lst"].append(transfer_loss.item())
stats["loss_lst"].append(loss.item())
logging.warning(f"Training batch: {batch_idx+1}/{n_batch}")
return dict_average(stats)
if __name__ == "__main__":
# 执行该命令运行4 GPU训练:CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 train.py --dist_train true --root_path /D_data/libriadapt_processed/asr1/ --dataset libriadapt_en_us_clean_shure --config config/adv_example.yaml --tgt_dataset libriadapt_en_us_clean_matrix --load_pretrained_model ""
setup_logging(verbose=0) # Should come first before other package import logging
parser = get_parser()
add_custom_arguments(parser)
arg_list = sys.argv[1:] + [
"--dict", '',
]
if "--config" not in arg_list:
arg_list += ["--config", "config/train.yaml"]
if "--outdir" not in arg_list:
arg_list += ["--outdir", '']
args, _ = parser.parse_known_args(arg_list)
# Use all GPUs
ngpu = torch.cuda.device_count() if args.ngpu is None else args.ngpu
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
[str(item) for item in range(ngpu)])
logging.warning(f"ngpu: {ngpu}")
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
set_deterministic_pytorch(args)
torch.cuda.manual_seed(args.seed)
if ngpu > 1:
torch.cuda.manual_seed_all(args.seed) # multi-gpu setting
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(arg_list)
setattr(args, "conf_name", ".".join(os.path.basename(args.config).split(".")[:-1]))
if not args.outdir:
args.outdir = f"./outputs/results_{args.dataset}/{args.conf_name}"
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
setattr(args, "ngpu", ngpu)
if args.data_file is not None:
args.root_path = args.data_file
if args.ngpu > 1:
if args.opt == "noam" and hasattr(args, "transformer_lr"):
logging.warning(f"Multi-GPU training: increase transformer lr {args.transformer_lr} --> {args.transformer_lr * np.sqrt(args.ngpu)}")
args.transformer_lr = args.transformer_lr * np.sqrt(args.ngpu)
elif args.opt == "adam" and hasattr(args, "adam_lr"):
logging.warning(f"Multi-GPU training: increase adam lr {args.adam_lr} --> {args.adam_lr * np.sqrt(args.ngpu)}")
args.adam_lr = args.adam_lr * np.sqrt(args.ngpu)
if args.dist_train:
torch.distributed.init_process_group(backend="nccl")
local_rank = torch.distributed.get_rank()
args.local_rank = local_rank
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
else:
logging.warning(
"Training batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
if args.accum_grad > 1:
logging.warning(
"gradient accumulation is not implemented. batch size is increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.accum_grad)
)
args.batch_size *= args.accum_grad
args.accum_grad = 1
dataloaders, in_out_shape = data_load.load_data(args.root_path,
args.dataset,
args,)
if args.transfer_type or args.self_training:
assert args.tgt_dataset and args.tgt_dataset != args.dataset, \
f"Target data set {args.tgt_dataset} must be specified and different from the training dataset"
model_module = "e2e_asr_udatransformer:UDASpeechTransformer"
model_class = UDASpeechTransformer
if args.self_training:
logging.warning("Self-training mode")
assert args.pseudo_label_json, "Pseudo label json must be speicified for self-training"
train_epoch = train_uda_epoch
if args.pseudo_label_json:
logging.warning(f"Load pseudo label from {args.pseudo_label_json}")
(tgt_train_loader, _, test_loader), _ = data_load.load_data(args.root_path,
args.tgt_dataset,
args,
pseudo_label_json=args.pseudo_label_json)
src_train_loader, val_loader, src_test_loader = dataloaders
dataloaders = ((src_train_loader, tgt_train_loader), val_loader, test_loader)
tgt_test_loader = test_loader
token_list = data_load.load_token_list(
os.path.join(args.root_path, data_load.data_config[args.dataset]["token"])
)
setattr(args, "model_module", model_module)
setattr(args, "char_list", token_list)
model = model_class(in_out_shape[0], in_out_shape[1], args=args)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(in_out_shape[0], in_out_shape[1], vars(args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
model.cuda()
if args.ngpu > 1 and args.dist_train:
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[local_rank],
output_device=local_rank
)
if args.freeze_mods:
model, model_params = freeze_modules(model, args.freeze_mods)
else:
model_params = model.parameters()
# Setup an optimizer
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model_params, rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
logging.warning(f"Using Adam optimizer with lr={args.adam_lr}")
optimizer = torch.optim.Adam(model_params, lr=args.adam_lr, weight_decay=args.weight_decay)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(
model_params, args.adim, args.transformer_warmup_steps, args.transformer_lr
)
# Resume from a snapshot
if args.resume:
logging.warning("resumed from %s" % args.resume)
torch_load(args.resume, model, optimizer)
setattr(args, "start_epoch", int(args.resume.split('.')[-1]) + 1)
else:
setattr(args, "start_epoch", 1)
if args.load_pretrained_model:
model_path, modules_to_load, exclude_modules = args.load_pretrained_model.split(":")
logging.warning("load pretrained model from %s" % args.load_pretrained_model)
load_pretrained_model(model=model, model_path=model_path,
modules_to_load=modules_to_load, exclude_modules=exclude_modules)
logging.warning(
"Total parameter of the model = "
+ str(sum(p.numel() for p in model.parameters()))
)
logging.warning(
"Trainable parameter of the model = "
+ str(sum(p.numel() for p in filter(lambda x: x.requires_grad, model.parameters())))
)
# For CER/WER computing
if args.bpemodel and "bpemodel" in data_load.data_config[args.dataset]:
logging.warning(f"load bpe model for {args.dataset}")
args.bpemodel = data_load.load_bpemodel(args.root_path, args.dataset)
save_path = f"{args.outdir}/model.loss.best"
if not args.decoding_mode and not args.pseudo_labeling:
train(dataloaders, model, optimizer, save_path)
if (args.compute_cer or args.pseudo_labeling) and args.local_rank == 0:
dataset = args.dataset if not args.tgt_dataset else args.tgt_dataset
dataloaders, _ = data_load.load_data(args.root_path, dataset, args)
splits = ["test", "val"] if not args.pseudo_labeling else ["train"]
for split in splits:
logging.warning(f"---------Recognizing {dataset} {split}----------")
args.result_label = f"{args.outdir}/{dataset}_{split}_recog.json"
if not data_load.data_config[dataset][split]:
split_path = os.path.join(args.root_path, f"{args.root_path}/tmp_dev_set_{dataset}.json")
else:
split_path = data_load.data_config[dataset][split]
args.recog_json = os.path.join(args.root_path, split_path)
idx = ["train", "val", "test"].index(split)
err_dict = recognize_and_evaluate(dataloaders[idx], model, args, model_path=save_path, wer=True, write_to_json=True)
logging.warning(f"CER: {err_dict['cer']['err']}")
logging.warning(f"WER: {err_dict['wer']['err']}") | 19,142 | 45.919118 | 320 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/eval_detector.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
import torch
import argparse
import re
from fairseq.models.transformer import TransformerModel
import os
import os.path
import time
import json
import numpy as np
from fairseq import utils
utils.import_user_module(argparse.Namespace(user_dir='./softcorrect'))
from softcorrect.softcorrect_model import SoftcorrectDetectorModel
from fairseq.tokenizer import tokenize_line
#os.environ["CUDA_VISIBLE_DEVICES"] = '0'
def remove_ch_spaces(input_str):
return re.sub(r"(?<=[\u4e00-\u9fff])(\s+)(?=[\u4e00-\u9fff])", "", input_str.strip())
def word_to_char(text):
text = re.sub(r'([\u4e00-\u9fff])', r'\1 ',text)
text = re.sub('\s+',' ',text)
return text.strip()
def tn_bpe(text):
text = re.sub("(?<=[\u4e00-\u9fff])(\s\▁\s*)(?=[\u4e00-\u9fff])"," ",text)
if text[0] == '▁':
text = " ".join(text.strip('▁').split())
assert len(text) == 2 * len(text.split()) - 1
assert '▁' not in text
return text
model_name_or_path = sys.argv[2]
eval_data = sys.argv[3]
try:
test_epoch = int(sys.argv[4])
checkpoint_file = "checkpoint{}.pt".format(test_epoch)
except:
test_epoch = 'best'
checkpoint_file = "checkpoint_best.pt"
print("test {}/{}".format(model_name_or_path, checkpoint_file))
data_name_or_path = "data/detector_dict"
bpe = "sentencepiece"
sentencepiece_model = "./sentence.bpe.model"
res_dir = os.path.join(model_name_or_path, ( ("results_aishell" )).replace('results', 'results_detector_' + str(test_epoch)))
tmp_dir = os.path.join(model_name_or_path, ( ("tmp_aishell" )).replace('tmp', 'tmp_detector_' + str(test_epoch)))
os.makedirs(res_dir, exist_ok=True)
os.makedirs(tmp_dir, exist_ok=True)
try:
infile_list = sys.argv[1].split(',')
except:
raise ValueError()
print("infile_list:", infile_list)
transf_gec = SoftcorrectDetectorModel.from_pretrained(model_name_or_path, checkpoint_file=checkpoint_file, data_name_or_path=data_name_or_path, bpe=bpe,
sentencepiece_model=sentencepiece_model, arch="softcorrect_detector", task="softcorrect_task", pad_first_dictionary=True)
transf_gec.eval()
transf_gec.cuda()
for infile in infile_list:
input_tsv = os.path.join(eval_data, infile, "aligned_nbest_token_raw.data.json")
all_time = []
eval_origin_dict = json.load(open(input_tsv, 'r', encoding='utf-8'))
translate_input_dict = {}
for k, v in eval_origin_dict["utts"].items():
if ' # ' in v["output"][0]["rec_text"]:
translate_input_dict[k] = (" ".join(v["output"][0]["rec_token"].replace(' ||| ', ' ').strip().split()), v["output"][0]["token"], None)
else:
raise ValueError("Incorrect format")
translated_output_dict = {}
for k, v in translate_input_dict.items():
text = v[0]
gt = v[1]
rec_token_score = v[2]
start_time = time.time()
time_ok = False
try:
text_length = len(text.split())
assert text_length % 4 == 0
force_mask_type = "dec_infer"
text_bin = transf_gec.binarize(text)
batched_hypos = transf_gec.generate(text_bin, iter_decode_max_iter=0, force_mask_type=force_mask_type, nbest_infer=4)
if isinstance(batched_hypos, tuple):
batched_hypos, exm_time = batched_hypos
else:
exm_time = 1000000
detector_probs = [list(map( lambda x: np.exp(float(x)), i)) for i in batched_hypos[0][0]['tokens'][0]][1:-1]
assert len(detector_probs) == text_length / 4
all_time.append(exm_time)
time_ok = True
# translated = translated[0]
translated_output_dict[k] = (text, gt, detector_probs)
#translated_list.append(translated)
except Exception as e:
print(input_tsv + "\t" + text + "\n")
#fout_ex.write(input_tsv + "\t" + text + "\n")
translated_list.append(text)
raise e
continue
end_time = time.time()
if not time_ok:
all_time.append(end_time - start_time)
eval_origin_dict["utts"][k]["output"][0]["detector_prob"] = detector_probs #" ".join(map(str, detector_probs))
os.makedirs(os.path.join(res_dir, input_tsv.split('/')[-2]), exist_ok=True)
if all_time:
with open(os.path.join(res_dir, input_tsv.split('/')[-2], input_tsv.split('/')[-2] + "_time.txt"), 'w') as outfile:
outfile.write("{}\t{}\t{}\n".format(len(all_time), sum(all_time), sum(all_time)/len(all_time)))
json.dump(eval_origin_dict, open(os.path.join(res_dir, input_tsv.split('/')[-2], 'data.json'), 'w', encoding='utf-8'), indent=4, sort_keys=True, ensure_ascii=False)
| 4,812 | 36.897638 | 177 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/eval_corrector.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
import argparse
import torch
import re
import os
import os.path
import time
import json
import numpy as np
from fairseq import utils
utils.import_user_module(argparse.Namespace(user_dir='./softcorrect'))
from softcorrect.softcorrect_model import SoftcorrectCorrectorModel
from fairseq.tokenizer import tokenize_line
#os.environ["CUDA_VISIBLE_DEVICES"] = '0'
def remove_ch_spaces(input_str):
return re.sub(r"(?<=[\u4e00-\u9fff])(\s+)(?=[\u4e00-\u9fff])", "", input_str.strip())
def word_to_char(text):
text = re.sub(r'([\u4e00-\u9fff])', r'\1 ',text)
text = re.sub('\s+',' ',text)
return text.strip()
def tn_bpe(text):
text = re.sub("(?<=[\u4e00-\u9fff])(\s\▁\s*)(?=[\u4e00-\u9fff])"," ",text)
if text[0] == '▁':
text = " ".join(text.strip('▁').split())
assert len(text) == 2 * len(text.split()) - 1
assert '▁' not in text
return text
detector_thre = sys.argv[2]
model_name_or_path = sys.argv[3]
eval_data = sys.argv[4]
try:
test_epoch = int(sys.argv[5])
checkpoint_file = "checkpoint{}.pt".format(test_epoch)
except:
test_epoch = 'best'
checkpoint_file = "checkpoint_best.pt"
try:
duptoken_thre = float(sys.argv[6])
except:
duptoken_thre = -0.43
try:
phone_thre = float(sys.argv[7])
except:
phone_thre = -0.09
if duptoken_thre < phone_thre:
duptoken_first = True
else:
duptoken_first = False
thre1 = min(phone_thre, duptoken_thre)
thre2 = max(phone_thre, duptoken_thre)
#checkpoint_file = "checkpoint_best.pt"
print("test {}/{}".format(model_name_or_path, checkpoint_file))
data_name_or_path = "data/aishell_corrector"
bpe = "sentencepiece"
sentencepiece_model = "./sentence.bpe.model"
res_dir = os.path.join(model_name_or_path, ( ("results_aishell_b")).replace('results', 'results_corrector_' + str(test_epoch) + '_p' + str(duptoken_thre) + '_h' + str(phone_thre)))
tmp_dir = os.path.join(model_name_or_path, ( ("tmp_aishell_b")).replace('tmp', 'tmp_corrector_' + str(test_epoch) + '_p' + str(duptoken_thre) + '_h' + str(phone_thre)))
os.makedirs(res_dir, exist_ok=True)
os.makedirs(tmp_dir, exist_ok=True)
#fout_ex = open(os.path.join(tmp_dir, "exception.log"), "w")
try:
infile_list = sys.argv[1].split(',')
except:
raise ValueError()
print("infile_list:", infile_list)
transf_gec = SoftcorrectCorrectorModel.from_pretrained(model_name_or_path, checkpoint_file=checkpoint_file, data_name_or_path=data_name_or_path, bpe=bpe,
sentencepiece_model=sentencepiece_model, arch="softcorrect_corrector", task="softcorrect_task", pad_first_dictionary=True)
transf_gec.eval()
transf_gec.cuda()
for infile in infile_list:
input_tsv = os.path.join(eval_data, infile, detector_thre, "data.json")
all_time = []
eval_origin_dict = json.load(open(input_tsv, 'r', encoding='utf-8'))
translate_input_dict = {}
for k, v in eval_origin_dict["utts"].items():
translate_input_dict[k] = (v["output"][0]["rec_text"].replace('<eos>', '').strip(), v["output"][0]["token"], v["output"][0]["score"])
translated_output_dict = {}
for k, v in translate_input_dict.items():
text = v[0]
gt = v[1]
rec_token_score = v[2]
rec_token_score = list(map(float, rec_token_score.split(';')))
start_time = time.time()
time_ok = False
try:
text = tn_bpe(transf_gec.apply_bpe(transf_gec.tokenize(word_to_char(text))))
force_mask_type = []
for score in rec_token_score:
if duptoken_first:
if score > thre2:
force_mask_type.append(0)
elif score > thre1:
force_mask_type.append(3)
else:
force_mask_type.append(4)
else:
if score > thre2:
force_mask_type.append(0)
elif score > thre1:
force_mask_type.append(4)
else:
force_mask_type.append(3)
if text.split()[0] == '▁':
assert len(rec_token_score) == len(text.split()) - 1
force_mask_type = [0, 0] + force_mask_type + [0]
else:
assert len(rec_token_score) == len(text.split())
force_mask_type = [0] + force_mask_type + [0]
if sum(force_mask_type) == 0:
translated = "".join(text.split())
exm_time = 0.0
else:
text = transf_gec.binarize(text)
batched_hypos = transf_gec.generate(text, iter_decode_max_iter=0, force_mask_type=force_mask_type, duptoken_error_distribution=[1.0, 0.0])
if isinstance(batched_hypos, tuple):
batched_hypos, exm_time = batched_hypos
else:
exm_time = 10000
translated = [transf_gec.decode(hypos[0]['tokens']) for hypos in batched_hypos][0]
all_time.append(exm_time)
time_ok = True
# translated = translated[0]
translated_output_dict[k] = (text, gt, translated)
#translated_list.append(translated)
except Exception as e:
print(input_tsv + "\t" + text + "\n")
#fout_ex.write(input_tsv + "\t" + text + "\n")
translated_list.append(text)
raise e
continue
end_time = time.time()
if not time_ok:
all_time.append(end_time - start_time)
eval_origin_dict["utts"][k]["output"][0]["rec_text"] = " ".join("".join(translated.split()).replace('▁', ' ').strip().split())
translated_char = [i for i in eval_origin_dict["utts"][k]["output"][0]["rec_text"]]
eval_origin_dict["utts"][k]["output"][0]["rec_token"] = " ".join(translated_char)
os.makedirs(os.path.join(res_dir, input_tsv.split('/')[-3], input_tsv.split('/')[-2]), exist_ok=True)
if all_time:
with open(os.path.join(res_dir, input_tsv.split('/')[-3], input_tsv.split('/')[-2], input_tsv.split('/')[-3] + "_time.txt"), 'w') as outfile:
outfile.write("{}\t{}\t{}\n".format(len(all_time), sum(all_time), sum(all_time)/len(all_time)))
json.dump(eval_origin_dict, open(os.path.join(res_dir, input_tsv.split('/')[-3], input_tsv.split('/')[-2], 'data.json'), 'w', encoding='utf-8'), indent=4, sort_keys=True, ensure_ascii=False)
| 6,563 | 37.840237 | 194 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/hub_utils_fc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import Any, Dict, Iterator, List, Tuple
import torch
from fairseq import utils
from fairseq.data import encoders
from torch import nn
import time
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
archive_map=None,
**kwargs
):
from fairseq import file_utils
import checkpoint_utils_sc
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == "checkpoint_file":
checkpoint_file = v
elif (
k != "path"
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path["path"]
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith("."):
kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if "user_dir" in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"]))
models, args, task = checkpoint_utils_sc.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
"args": args,
"task": task,
"models": models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(args)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(getattr(args, "replace_unk", None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(
self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs
) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(
self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs
) -> List[str]:
if isinstance(sentences, str):
exc_text, exc_time = self.sample([sentences], beam=beam, verbose=verbose, **kwargs)
return exc_text[0], exc_time
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos, exc_time = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos], exc_time
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [
hypos[0]
for hypos in self.generate(
tokenized_sentences, score_reference=True, **kwargs
)
]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
nbest_infer = 0,
force_mask_type=None,
duptoken_error_distribution=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, nbest_infer=nbest_infer, force_mask_type=force_mask_type, duptoken_error_distribution=duptoken_error_distribution, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
begin_time_exc = 0.0
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs, nbest_infer, force_mask_type=force_mask_type, duptoken_error_distribution=duptoken_error_distribution):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
assert begin_time_exc == 0.0
begin_time_exc = time.time()
translations = self.task.inference_step(
generator, self.models, batch, force_mask_type=force_mask_type, duptoken_error_distribution=duptoken_error_distribution, **inference_step_args
)
exc_time = time.time() - begin_time_exc
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info("S\t{}".format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo["tokens"])
logger.info("H\t{}\t{}".format(hypo["score"], hypo_str))
logger.info(
"P\t{}".format(
" ".join(
map(
lambda x: "{:.4f}".format(x),
hypo["positional_scores"].tolist(),
)
)
)
)
if hypo["alignment"] is not None and getarg(
"print_alignment", False
):
logger.info(
"A\t{}".format(
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in hypo["alignment"]
]
)
)
)
return outputs, exc_time
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool, nbest_infer: int, force_mask_type: List[int]=None, duptoken_error_distribution: List[float]=None
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths, nbest_infer=nbest_infer, force_mask_type=force_mask_type, duptoken_error_distribution=duptoken_error_distribution),
max_tokens=self.args.max_tokens,
max_sentences=self.args.batch_size,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| 11,655 | 36.844156 | 201 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/train_sc.py | #!/usr/bin/env python3 -u
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import os
import random
import sys
import numpy as np
import torch
from fairseq import (
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from trainer_sc import Trainer
import checkpoint_utils_sc
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
def main(args):
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils_sc.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.batch_size
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils_sc.load_checkpoint(
args,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_losses = [None]
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils_sc.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils_sc.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils_sc.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| 12,121 | 32.766017 | 93 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/softcorrect_corrector_generator.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import numpy as np
import torch
from fairseq import utils
import time
DecoderOut = namedtuple(
"FastCorrectDecoderOut",
["output_tokens", "output_scores", "attn", "step", "max_step", "history", "to_be_edited_pred", "wer_dur_pred"],
)
class SoftcorrectCorrectorGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
beam_size=1,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
reranking=False,
edit_thre=0.0,
print_werdur=False
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.vocab_symbols = tgt_dict.symbols
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.beam_size = beam_size
self.reranking = reranking
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
self.edit_thre = edit_thre
self.print_werdur = print_werdur
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None, constraints=None, werdur_gt_str="", force_mask_type=None):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the IterativeRefinementGenerator is not supported"
)
# TODO: iterative refinement generator does not support ensemble for now.
if not self.retain_dropout:
for model in models:
model.eval()
model, reranker = models[0], None
if self.reranking:
assert len(models) > 1, "Assuming the last checkpoint is the reranker"
assert (
self.beam_size > 1
), "Reranking requires multiple translation for each example"
reranker = models[-1]
models = models[:-1]
if len(models) > 1 and hasattr(model, "enable_ensemble"):
assert model.allow_ensemble, "{} does not support ensembling".format(
model.__class__.__name__
)
model.enable_ensemble(models)
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
if "mask_type" in sample["net_input"]:
mask_type = sample["net_input"]["mask_type"]
else:
mask_type = None
#print("mask_type:", mask_type)
#print("src_tokens:", src_tokens)
bsz, src_len = src_tokens.size()[:2]
# initialize
# print("before encoder:", time.time())
encoder_out = model.forward_encoder([src_tokens, src_lengths, mask_type])
if mask_type is not None:
if force_mask_type == "dec_infer":
assert len(encoder_out.shape) == 3
if len(src_tokens.shape) == 2:
tgt_logit = torch.gather(encoder_out, dim=-1,
index=sample["net_input"]["target_full"][:, :, None]).squeeze(-1)
tgt_rank = (encoder_out > tgt_logit[:, :, None]).long().sum(-1) + 1
_, tgt_top5 = torch.topk(encoder_out, 5, dim=-1)
tgt_top5 = [[model.encoder.dictionary[int(tgt_top5[0][i][j])] for j in range(5)]for i in range(len(tgt_top5[0]))]
elif len(src_tokens.shape) == 3:
tgt_logit = torch.gather(encoder_out, dim=-1, index=src_tokens)
tgt_rank = (encoder_out > tgt_logit[:, :, 0:1]).long().sum(-1) + 1
_, tgt_top5 = torch.topk(encoder_out, 5, dim=-1)
tgt_top5 = [[model.encoder.dictionary[int(tgt_top5[0][i][j])] for j in range(5)] for i in range(len(tgt_top5[0]))]
else:
raise ValueError("Impossible input shape {}".format(src_tokens.shape))
return [
[
{
'steps': 0,
'tokens': tgt_logit.cpu(),
'positional_scores': None,
'score': tgt_logit.sum(),
'hypo_attn': None,
'alignment': None
}
]
]
hypo_tokens = encoder_out[0].max(-1)[1]
final_tokens = []
assert src_tokens.shape[1] == mask_type.shape[1] == hypo_tokens.shape[0]
assert src_tokens.shape[0] == 1
last_CTC_token = None
for iter_token in range(len(hypo_tokens)):
if int(mask_type[0][iter_token]) in [0, 1, 2]:
final_tokens.append(src_tokens[0][iter_token])
last_CTC_token = None
elif int(mask_type[0][iter_token]) in [3]:
final_tokens.append(hypo_tokens[iter_token])
last_CTC_token = None
else:
assert int(mask_type[0][iter_token]) in [4]
if hypo_tokens[iter_token] != 0 and hypo_tokens[iter_token] != last_CTC_token:
last_CTC_token = hypo_tokens[iter_token]
final_tokens.append(hypo_tokens[iter_token])
elif hypo_tokens[iter_token] == 0:
last_CTC_token = None
finalized = [
[
{
'steps': 0,
'tokens': torch.LongTensor(final_tokens),
'positional_scores': None,
'score': encoder_out[0].max(-1)[0].sum(),
'hypo_attn': None,
'alignment': None
}
]
]
return finalized
else:
raise ValueError("Must have mask type")
def rerank(self, reranker, finalized, encoder_input, beam_size):
def rebuild_batch(finalized):
finalized_tokens = [f[0]["tokens"] for f in finalized]
finalized_maxlen = max(f.size(0) for f in finalized_tokens)
final_output_tokens = (
finalized_tokens[0]
.new_zeros(len(finalized_tokens), finalized_maxlen)
.fill_(self.pad)
)
for i, f in enumerate(finalized_tokens):
final_output_tokens[i, : f.size(0)] = f
return final_output_tokens
final_output_tokens = rebuild_batch(finalized)
final_output_tokens[
:, 0
] = self.eos # autoregressive model assumes starting with EOS
reranker_encoder_out = reranker.encoder(*encoder_input)
length_beam_order = (
utils.new_arange(
final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)
)
.t()
.reshape(-1)
)
reranker_encoder_out = reranker.encoder.reorder_encoder_out(
reranker_encoder_out, length_beam_order
)
reranking_scores = reranker.get_normalized_probs(
reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out),
True,
None,
)
reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
reranking_scores = (
reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
)
reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(
reranking_scores
)
for i in range(len(finalized)):
finalized[i][0]["score"] = reranking_scores[i]
return finalized
| 10,257 | 37.56391 | 134 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/checkpoint_utils_sc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import logging
import os
import re
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
import copy
logger = logging.getLogger(__name__)
def save_checkpoint(args, trainer, epoch_itr, val_loss):
from fairseq import distributed_utils, meters
# only one worker should attempt to create the required dir
if args.distributed_rank == 0:
os.makedirs(args.save_dir, exist_ok=True)
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if args.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if args.no_save:
return
trainer.consolidate_optimizer()
if not trainer.is_data_parallel_master:
return
def is_better(a, b):
return a >= b if args.maximize_best_checkpoint_metric else a <= b
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
suffix = getattr(args, "checkpoint_suffix", "")
checkpoint_conds = collections.OrderedDict()
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = (
end_of_epoch
and not args.no_epoch_checkpoints
and epoch % args.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not end_of_epoch
and args.save_interval_updates > 0
and updates % args.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
if val_loss is not None and args.keep_best_checkpoints > 0:
checkpoint_conds[
"checkpoint.best_{}_{:.2f}.pt".format(args.best_checkpoint_metric, val_loss)
] = not hasattr(save_checkpoint, "best") or is_better(
val_loss, save_checkpoint.best
)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not args.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
checkpoints = [
os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
PathManager.copy(checkpoints[0], cp, overwrite=True)
write_timer.stop()
logger.info(
"saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[args.keep_interval_updates :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[args.keep_last_epochs :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_best_checkpoints > 0:
# only keep the best N checkpoints according to validation metric
checkpoints = checkpoint_paths(
args.save_dir,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format(
args.best_checkpoint_metric
),
)
if not args.maximize_best_checkpoint_metric:
checkpoints = checkpoints[::-1]
for old_chk in checkpoints[args.keep_best_checkpoints :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
def load_checkpoint(args, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
reset_optimizer = args.reset_optimizer
reset_lr_scheduler = args.reset_lr_scheduler
optimizer_overrides = eval(args.optimizer_overrides)
reset_meters = args.reset_meters
reset_dataloader = args.reset_dataloader
if getattr(args, "finetune_from_model", None) is not None and (
reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader
):
raise ValueError(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
)
suffix = getattr(args, "checkpoint_suffix", "")
if (
args.restore_file == "checkpoint_last.pt"
): # default value of restore_file is 'checkpoint_last.pt'
checkpoint_path = os.path.join(
args.save_dir, "checkpoint_last{}.pt".format(suffix)
)
first_launch = not PathManager.exists(checkpoint_path)
if getattr(args, "finetune_from_model", None) is not None and first_launch:
# if there is no last checkpoint to restore, start the finetune from pretrained model
# else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.
if PathManager.exists(args.finetune_from_model):
checkpoint_path = args.finetune_from_model
reset_optimizer = True
reset_lr_scheduler = True
reset_meters = True
reset_dataloader = True
logger.info(
f"loading pretrained model from {checkpoint_path}: "
"optimizer, lr scheduler, meters, dataloader will be reset"
)
else:
raise ValueError(
f"--funetune-from-model {args.finetune_from_model} does not exist"
)
elif getattr(args, "model_parallel_size", 1) > 1:
checkpoint_path = args.restore_file.replace(".pt", suffix + ".pt")
else:
checkpoint_path = args.restore_file
if args.restore_file != "checkpoint_last.pt" and getattr(
args, "finetune_from_model", None
):
raise ValueError(
"--finetune-from-model and --restore-file (non-default value) "
"can not be specified together: " + str(args)
)
extra_state = trainer.load_checkpoint(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
reset_meters=reset_meters,
bert_generator_encoder_model_path=args.bert_generator_encoder_model_path,
main_encoder_warmup_path=args.main_encoder_warmup_path,
)
if (
extra_state is not None
and "best" in extra_state
and not reset_optimizer
and not reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state["epoch"], load_dataset=True, **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(
epoch=1, load_dataset=True, **passthrough_args
)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr
def load_checkpoint_to_cpu(path, arg_overrides=None):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility)."""
with open(PathManager.get_local_path(path), "rb") as f:
state = torch.load(
f, map_location=lambda s, l: default_restore_location(s, "cpu")
)
args = state["args"]
if arg_overrides is not None:
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
state = _upgrade_state_dict(state)
return state
def load_model_ensemble(
filenames, arg_overrides=None, task=None, strict=True, suffix="", num_shards=1
):
"""Loads an ensemble of models.
Args:
filenames (List[str]): checkpoint files to load
arg_overrides (Dict[str,Any], optional): override model args that
were used during model training
task (fairseq.tasks.FairseqTask, optional): task to use for loading
"""
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble, args, _task = load_model_ensemble_and_task(
filenames,
arg_overrides,
task,
strict,
suffix,
num_shards,
)
return ensemble, args
def load_model_ensemble_and_task(
filenames, arg_overrides=None, task=None, strict=True, suffix="", num_shards=1
):
from fairseq import tasks
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble = []
for filename in filenames:
orig_filename = filename
for shard_idx in range(num_shards):
if num_shards == 1:
filename = filename.replace(".pt", suffix + ".pt")
else:
filename = orig_filename[:-3] + f"_part{shard_idx}.pt"
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = load_checkpoint_to_cpu(filename, arg_overrides)
if shard_idx == 0:
args = state["args"]
if task is None:
task = tasks.setup_task(args)
# build model for ensemble
model = task.build_model(args)
model.load_state_dict(state["model"], strict=strict, args=args)
ensemble.append(model)
return ensemble, args, task
def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = float(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def torch_persistent_save(obj, f):
if isinstance(f, str):
with PathManager.open(f, "wb") as h:
torch_persistent_save(obj, h)
return
for i in range(3):
try:
return torch.save(obj, f)
except Exception:
if i == 2:
logger.error(traceback.format_exc())
def save_state(
filename,
args,
model_state_dict,
criterion,
optimizer,
lr_scheduler,
num_updates,
optim_history=None,
extra_state=None,
):
from fairseq import utils
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
"args": args,
"model": model_state_dict or {},
"optimizer_history": optim_history
+ [
{
"criterion_name": criterion.__class__.__name__,
"optimizer_name": optimizer.__class__.__name__,
"lr_scheduler_state": lr_scheduler.state_dict(),
"num_updates": num_updates,
}
],
"extra_state": extra_state,
}
if utils.has_parameters(criterion):
state_dict["criterion"] = criterion.state_dict()
if not args.no_save_optimizer_state:
state_dict["last_optimizer_state"] = optimizer.state_dict()
# convert all state to CPU
state_dict = utils.move_to_cpu(state_dict)
with PathManager.open(filename, "wb") as f:
torch_persistent_save(state_dict, f)
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
from fairseq import models, registry, tasks
# add optimizer_history
if "optimizer_history" not in state:
state["optimizer_history"] = [
{"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]}
]
state["last_optimizer_state"] = state["optimizer"]
del state["optimizer"]
del state["best_loss"]
# move extra_state into sub-dictionary
if "epoch" in state and "extra_state" not in state:
state["extra_state"] = {
"epoch": state["epoch"],
"batch_offset": state["batch_offset"],
"val_loss": state["val_loss"],
}
del state["epoch"]
del state["batch_offset"]
del state["val_loss"]
# reduce optimizer history's memory usage (only keep the last state)
if "optimizer" in state["optimizer_history"][-1]:
state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"]
for optim_hist in state["optimizer_history"]:
del optim_hist["optimizer"]
# record the optimizer class name
if "optimizer_name" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG"
# move best_loss into lr_scheduler_state
if "lr_scheduler_state" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["lr_scheduler_state"] = {
"best": state["optimizer_history"][-1]["best_loss"]
}
del state["optimizer_history"][-1]["best_loss"]
# keep track of number of updates
if "num_updates" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["num_updates"] = 0
# old model checkpoints may not have separate source/target positions
if hasattr(state["args"], "max_positions") and not hasattr(
state["args"], "max_source_positions"
):
state["args"].max_source_positions = state["args"].max_positions
state["args"].max_target_positions = state["args"].max_positions
# use stateful training data iterator
if "train_iterator" not in state["extra_state"]:
state["extra_state"]["train_iterator"] = {
"epoch": state["extra_state"]["epoch"],
"iterations_in_epoch": state["extra_state"].get("batch_offset", 0),
}
# default to translation task
if not hasattr(state["args"], "task"):
state["args"].task = "translation"
# --raw-text and --lazy-load are deprecated
if getattr(state["args"], "raw_text", False):
state["args"].dataset_impl = "raw"
elif getattr(state["args"], "lazy_load", False):
state["args"].dataset_impl = "lazy"
# epochs start at 1
if state["extra_state"]["train_iterator"] is not None:
state["extra_state"]["train_iterator"]["epoch"] = max(
state["extra_state"]["train_iterator"].get("epoch", 1),
1,
)
# set any missing default values in the task, model or other registries
registry.set_defaults(state["args"], tasks.TASK_REGISTRY[state["args"].task])
registry.set_defaults(state["args"], models.ARCH_MODEL_REGISTRY[state["args"].arch])
for registry_name, REGISTRY in registry.REGISTRIES.items():
choice = getattr(state["args"], registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
registry.set_defaults(state["args"], cls)
return state
def prune_state_dict(state_dict, args):
"""Prune the given state_dict if desired for LayerDrop
(https://arxiv.org/abs/1909.11556).
Training with LayerDrop allows models to be robust to pruning at inference
time. This function prunes state_dict to allow smaller models to be loaded
from a larger model and re-maps the existing state_dict for this to occur.
It's called by functions that load models from checkpoints and does not
need to be called directly.
"""
if not args or args.arch == "ptt_transformer":
# args should not be none, but don't crash if it is.
return state_dict
encoder_layers_to_keep = (
args.encoder_layers_to_keep if "encoder_layers_to_keep" in vars(args) else None
)
decoder_layers_to_keep = (
args.decoder_layers_to_keep if "decoder_layers_to_keep" in vars(args) else None
)
if not encoder_layers_to_keep and not decoder_layers_to_keep:
return state_dict
# apply pruning
logger.info(
"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
)
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted(
[int(layer_string) for layer_string in layers_to_keep.split(",")]
)
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
return {"substitution_regex": regex, "mapping_dict": mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search(r"\.layers\.(\d+)\.", layer_name)
# if layer has no number in it, it is a supporting layer, such as an
# embedding
if not match:
new_state_dict[layer_name] = state_dict[layer_name]
continue
# otherwise, layer should be pruned.
original_layer_number = match.group(1)
# figure out which mapping dict to replace from
for pruning_pass in pruning_passes:
if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
"substitution_regex"
].search(layer_name):
new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
substitution_match = pruning_pass["substitution_regex"].search(
layer_name
)
new_state_key = (
layer_name[: substitution_match.start(1)]
+ new_layer_number
+ layer_name[substitution_match.end(1) :]
)
new_state_dict[new_state_key] = state_dict[layer_name]
# Since layers are now pruned, *_layers_to_keep are no longer needed.
# This is more of "It would make it work fix" rather than a proper fix.
if "encoder_layers_to_keep" in vars(args):
args.encoder_layers_to_keep = None
if "decoder_layers_to_keep" in vars(args):
args.decoder_layers_to_keep = None
return new_state_dict
def load_pretrained_component_from_model(
component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str
):
"""
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
provided `component` object. If state_dict fails to load, there may be a
mismatch in the architecture of the corresponding `component` found in the
`checkpoint` file.
"""
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = "encoder"
elif isinstance(component, FairseqDecoder):
component_type = "decoder"
else:
raise ValueError(
"component to load must be either a FairseqEncoder or "
"FairseqDecoder. Loading other component types are not supported."
)
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
# encoder.input_layers.0.0.weight --> input_layers.0.0.weight
component_subkey = key[len(component_type) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return component
def verify_checkpoint_directory(save_dir: str) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, "dummy")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
logger.warning(
"Unable to access checkpoint save directory: {}".format(save_dir)
)
raise e
else:
os.remove(temp_file_path)
| 21,532 | 36.125862 | 114 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/binarizer_fc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
import torch
from fairseq.file_io import PathManager
from fairseq.tokenizer import tokenize_line
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
class Binarizer:
@staticmethod
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
src_with_werdur=False,
):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
if already_numberized:
assert ' |||| ' not in line, "This constraint is add when doing asr correction exp"
id_strings = line.strip().split()
id_list = [int(id_string) for id_string in id_strings]
if reverse_order:
id_list.reverse()
if append_eos:
id_list.append(dict.eos())
ids = torch.IntTensor(id_list)
else:
if ' |||| ' in line:
assert src_with_werdur
line, werdur_info = line.split(' |||| ')
werdur_list = []
for i in werdur_info.strip().split():
assert abs(int(i)) < 30000
werdur_list.append(int(i) + 32768)
if append_eos:
werdur_list.append(1 + 32768)
werdur_list_length = len(werdur_list)
else:
werdur_list = None
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
# print(ids)
if werdur_list is not None:
assert werdur_list_length == len(ids)
ids = torch.cat([ids, torch.IntTensor(werdur_list)], dim=-1)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {
"nseq": nseq,
"nunk": sum(replaced.values()),
"ntok": ntok,
"replaced": replaced,
}
@staticmethod
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(PathManager.get_local_path(filename), "r") as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {"nseq": nseq}
@staticmethod
def find_offsets(filename, num_chunks):
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
| 4,394 | 33.606299 | 103 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/trainer_sc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import contextlib
import logging
import sys
import time
from itertools import chain
from typing import Any, Dict, List
import torch
from fairseq import checkpoint_utils, distributed_utils, models, optim, utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
import copy
logger = logging.getLogger(__name__)
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, args, task, model, criterion, quantizer=None):
self.args = args
self.task = task
# catalog shared parameters
shared_params = _catalog_shared_params(model)
self.tpu = getattr(args, "tpu", False)
self.cuda = torch.cuda.is_available() and not args.cpu and not self.tpu
if self.cuda:
self.device = torch.device("cuda")
elif self.tpu:
self.device = utils.get_tpu_device(args)
else:
self.device = torch.device("cpu")
# copy model and criterion to current device/dtype
self._criterion = criterion
self._model = model
if self.tpu:
import torch_xla.core.xla_model as xm
self._model = xm.send_cpu_data_to_device(self._model, self.device)
if args.fp16:
self._criterion = self._criterion.half()
self._model = self._model.half()
elif args.bf16:
self._criterion = self._criterion.to(dtype=torch.bfloat16)
self._model = self._model.to(dtype=torch.bfloat16)
if not args.pipeline_model_parallel:
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
self.pipeline_model_parallel = args.pipeline_model_parallel
self.last_device = None
if self.cuda and self.pipeline_model_parallel:
self.last_device = torch.device(args.pipeline_devices[-1])
# check that shared parameters are preserved after device transfer
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info(
"detected shared parameter: {} <- {}".format(shared_param[0], path)
)
_set_module_by_path(self._model, path, ref)
self._dummy_batch = None # indicates we don't have a dummy batch at first
self._lr_scheduler = None
self._num_updates = 0
self._num_xla_compiles = 0 # for TPUs
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
# TODO(myleott): support tpu
if self.cuda and self.data_parallel_world_size > 1:
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
self.quantizer = quantizer
if self.quantizer is not None:
self.quantizer.set_trainer(self)
# get detailed cuda environment
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if self.data_parallel_world_size > 1:
self.cuda_env_arr = distributed_utils.all_gather_list(self.cuda_env)
else:
self.cuda_env_arr = [self.cuda_env]
if self.data_parallel_rank == 0:
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time("wall", priority=790, round=0)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
"""Reinitialize the Trainer, typically after model params change."""
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
@property
def data_parallel_world_size(self):
return self.args.distributed_world_size
@property
def data_parallel_process_group(self):
if self.tpu:
return ("tpu", None)
else:
return None
@property
def data_parallel_rank(self):
return self.args.distributed_rank
@property
def is_data_parallel_master(self):
return distributed_utils.is_master(self.args)
@property
def criterion(self):
if self._wrapped_criterion is None:
if (
utils.has_parameters(self._criterion)
and self.data_parallel_world_size > 1
and not self.args.use_bmuf
and not self.tpu
):
self._wrapped_criterion = models.DistributedFairseqModel(
self.args,
self._criterion,
process_group=self.data_parallel_process_group,
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
@property
def model(self):
if self._wrapped_model is None:
if (
self.data_parallel_world_size > 1
and not self.args.use_bmuf
and not self.tpu
):
self._wrapped_model = models.DistributedFairseqModel(
self.args,
self._model,
process_group=self.data_parallel_process_group,
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
params = list(
filter(
lambda p: p.requires_grad,
chain(self.model.parameters(), self.criterion.parameters()),
)
)
if self.args.fp16 or self.args.bf16:
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
logger.info(
"NOTE: your device does NOT support faster training with --fp16, "
"please switch to FP32 which is likely to be faster"
)
if self.args.memory_efficient_fp16 or self.args.memory_efficient_bf16:
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.args, params
)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:
logger.info("NOTE: your device may support faster training with --fp16")
self._optimizer = optim.build_optimizer(self.args, params)
if self.args.use_bmuf:
self._optimizer = optim.FairseqBMUF(self.args, self._optimizer)
if self.args.zero_sharding == "os":
if (
self.args.fp16
and not self.args.memory_efficient_fp16
and not self.args.memory_efficient_bf16
) and not self.args.fp16_no_flatten_grads:
raise ValueError(
"ZeRO is incomptabile with fp16 and flattened grads. "
"Please use --fp16-no-flatten-grads"
)
else:
optim.shard_(
self.args, self._optimizer, self.data_parallel_process_group
)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)
self._lr_scheduler.step_update(0)
def consolidate_optimizer(self):
"""For OSS, we need to consolidate the state dict."""
if hasattr(self.optimizer.optimizer, "consolidate_state_dict"):
self.optimizer.optimizer.consolidate_state_dict()
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
if self.is_data_parallel_master: # only save one checkpoint
extra_state["metrics"] = metrics.state_dict()
extra_state["previous_training_time"] = self.cumulative_training_time()
checkpoint_utils.save_state(
filename,
self.args,
self.get_model().state_dict(),
self.get_criterion(),
self.optimizer,
self.lr_scheduler,
self.get_num_updates(),
self._optim_history,
extra_state,
)
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
bert_generator_encoder_model_path="",
main_encoder_warmup_path="",
):
"""Load all training state from a checkpoint file."""
extra_state, self._optim_history, last_optim_state = None, [], None
bexists = PathManager.isfile(filename)
if bexists:
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
# load model parameters
try:
self.get_model().load_state_dict(
state["model"], strict=True, args=self.args
)
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = state["extra_state"]
self._optim_history = state["optimizer_history"]
last_optim_state = state.get("last_optimizer_state", None)
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), "Criterion does not match; please reset the optimizer (--reset-optimizer)."
assert (
last_optim["optimizer_name"] == self.optimizer.__class__.__name__
), "Optimizer does not match; please reset the optimizer (--reset-optimizer)."
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
epoch = extra_state["train_iterator"]["epoch"]
logger.info(
"loaded checkpoint {} (epoch {} @ {} updates)".format(
filename, epoch, self.get_num_updates()
)
)
if "previous_training_time" in extra_state:
self._previous_training_time = extra_state["previous_training_time"]
self._start_time = time.time()
self.lr_step(epoch)
if "metrics" in extra_state and not reset_meters:
metrics.load_state_dict(extra_state["metrics"])
# reset TimeMeters, since their start times don't make sense anymore
for meter in metrics.get_meters("default"):
if isinstance(meter, meters.TimeMeter):
meter.reset()
else:
logger.info("no existing checkpoint found {}".format(filename))
if bert_generator_encoder_model_path:
logger.info("Loading bert_generator encoder from {}".format(bert_generator_encoder_model_path))
bert_generator_state = checkpoint_utils.load_checkpoint_to_cpu(bert_generator_encoder_model_path)
for key in list(bert_generator_state["model"].keys()):
if key.startswith("encoder"):
bert_generator_state["model"][key.replace("encoder", "bert_generator_encoder", 1)] = copy.deepcopy(bert_generator_state["model"][key])
del bert_generator_state["model"][key]
elif key.startswith("output_projection"):
bert_generator_state["model"][key.replace("output_projection", "bert_generator_output_projection", 1)] = copy.deepcopy(bert_generator_state["model"][key])
del bert_generator_state["model"][key]
elif key.startswith("detector_projection"):
del bert_generator_state["model"][key]
else:
raise ValueError("BERT generator should not have {} in model".format(key))
# load model parameters
try:
missing_keys, unexpected_keys = self.get_model().load_state_dict(
bert_generator_state["model"], strict=False, args=self.args
)
assert len(unexpected_keys) == 0, unexpected_keys
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
if main_encoder_warmup_path:
logger.info("Loading warmup main encoder from {}".format(main_encoder_warmup_path))
warmup_state = checkpoint_utils.load_checkpoint_to_cpu(main_encoder_warmup_path)
bert_generator_found = False
for key in list(warmup_state["model"].keys()):
if key.startswith("encoder"):
pass
# warmup_state["model"][key.replace("encoder", "bert_generator_encoder", 1)] = copy.deepcopy(warmup_state["model"][key])
# del warmup_state["model"][key]
elif key.startswith("output_projection"):
# warmup_state["model"][key.replace("output_projection", "bert_generator_output_projection", 1)] = copy.deepcopy(warmup_state["model"][key])
del warmup_state["model"][key]
# pass
elif key.startswith("detector_projection"):
del warmup_state["model"][key]
elif key.startswith("bert_generator"):
del warmup_state["model"][key]
bert_generator_found = True
else:
raise ValueError("Warmup encoder should not have {} in model".format(key))
if bert_generator_found:
logger.info("bert_generator modules found in warmup main encoder, dropped!")
# load model parameters
try:
missing_keys, unexpected_keys = self.get_model().load_state_dict(
warmup_state["model"], strict=False, args=self.args
)
assert len(unexpected_keys) == 0, unexpected_keys
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
return extra_state
def get_train_iterator(
self,
epoch,
combine=True,
load_dataset=True,
data_selector=None,
shard_batch_itr=True,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
if load_dataset:
logger.info("loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.args.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
)
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(self.args.train_subset),
max_tokens=self.args.max_tokens,
max_sentences=self.args.batch_size,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
self.args.max_tokens,
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.data_parallel_world_size if shard_batch_itr else 1,
shard_id=self.data_parallel_rank if shard_batch_itr else 0,
num_workers=self.args.num_workers,
epoch=epoch,
data_buffer_size=self.args.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def get_valid_iterator(
self,
subset,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(subset),
max_tokens=self.args.max_tokens_valid,
max_sentences=self.args.batch_size_valid,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
),
ignore_invalid_inputs=self.args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.args.num_workers,
data_buffer_size=self.args.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
self.lr_step_begin_epoch(epoch)
if self.quantizer is not None:
self.quantizer.begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("begin_epoch") # wait for all workers
xm.mark_step()
def begin_valid_epoch(self, epoch):
"""Called at the beginning of each validation epoch."""
# task specific setup per validation epoch
self.task.begin_valid_epoch(epoch, self.get_model())
def reset_dummy_batch(self, batch):
self._dummy_batch = batch
@metrics.aggregate("train")
def train_step(self, samples, raise_oom=False):
"""Do forward, backward and parameter update."""
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time("train_wall", priority=800, round=0)
# forward and backward pass
logging_outputs, sample_size, ooms = [], 0, 0
for i, sample in enumerate(samples):
sample = self._prepare_sample(sample)
if sample is None:
# when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch)
is_dummy_batch = True
else:
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
is_dummy_batch = False
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.data_parallel_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
loss, sample_size_i, logging_output = self.task.train_step(
sample=sample,
model=self.model,
criterion=self.criterion,
optimizer=self.optimizer,
update_num=self.get_num_updates(),
ignore_grad=is_dummy_batch,
)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
# emptying the CUDA cache after the first step can
# reduce the chance of OOM
if self.cuda and self.get_num_updates() == 0:
torch.cuda.empty_cache()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
logger.warning(
"attempting to recover from OOM in forward/backward pass"
)
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if self.args.distributed_world_size == 1:
return None
else:
raise e
if self.tpu and i < len(samples) - 1:
# tpu-comment: every XLA operation before marking step is
# appended to the IR graph, and processing too many batches
# before marking step can lead to OOM errors.
# To handle gradient accumulation use case, we explicitly
# mark step here for every forward pass without a backward pass
import torch_xla.core.xla_model as xm
xm.mark_step()
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
# gather logging outputs from all replicas
if self._sync_stats():
train_time = self._local_cumulative_training_time()
logging_outputs, (
sample_size,
ooms,
total_train_time,
) = self._aggregate_logging_outputs(
logging_outputs,
sample_size,
ooms,
train_time,
ignore=is_dummy_batch,
)
self._cumulative_training_time = (
total_train_time / self.data_parallel_world_size
)
if hasattr(self.model, "all_reduce"):
self.model.all_reduce()
overflow = False
try:
if self.tpu and self.data_parallel_world_size > 1:
import torch_xla.core.xla_model as xm
gradients = xm._fetch_gradients(self.optimizer.optimizer)
xm.all_reduce(
"sum", gradients, scale=1.0 / self.data_parallel_world_size
)
with torch.autograd.profiler.record_function("multiply-grads"):
# multiply gradients by (# GPUs / sample_size) since DDP
# already normalizes by the number of GPUs. Thus we get
# (sum_of_gradients / sample_size).
if not self.args.use_bmuf:
self.optimizer.multiply_grads(
self.data_parallel_world_size / sample_size
)
elif sample_size > 0: # BMUF needs to check sample size
num = self.data_parallel_world_size if self._sync_stats() else 1
self.optimizer.multiply_grads(num / sample_size)
with torch.autograd.profiler.record_function("clip-grads"):
# clip grads
grad_norm = self.clip_grad_norm(self.args.clip_norm)
# check that grad norms are consistent across workers
# on tpu check tensor is slow
if not self.tpu:
if (
not self.args.use_bmuf
and self.args.distributed_wrapper != "SlowMo"
):
self._check_grad_norms(grad_norm)
if not torch.isfinite(grad_norm).all():
# check local gradnorm single GPU case, trigger NanDetector
raise FloatingPointError("gradients are Nan/Inf")
with torch.autograd.profiler.record_function("optimizer"):
# take an optimization step
self.optimizer.step()
except FloatingPointError:
# re-run the forward and backward pass with hooks attached to print
# out where it fails
with NanDetector(self.get_model()):
self.task.train_step(
sample,
self.model,
self.criterion,
self.optimizer,
self.get_num_updates(),
ignore_grad=False,
)
raise
except OverflowError as e:
overflow = True
logger.info("NOTE: overflow detected, " + str(e))
grad_norm = torch.tensor(0.0).cuda()
self.zero_grad()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
logger.error("OOM during optimization, irrecoverable")
raise e
# Some distributed wrappers (e.g., SlowMo) need access to the optimizer after the step
if hasattr(self.model, "perform_additional_optimizer_actions"):
if hasattr(self.optimizer, "fp32_params"):
self.model.perform_additional_optimizer_actions(
self.optimizer.optimizer, self.optimizer.fp32_params
)
else:
self.model.perform_additional_optimizer_actions(
self.optimizer.optimizer
)
if not overflow or self.args.distributed_wrapper == "SlowMo":
self.set_num_updates(self.get_num_updates() + 1)
if self.tpu:
# mark step on TPUs
import torch_xla.core.xla_model as xm
xm.mark_step()
# only log stats every log_interval steps
# this causes wps to be misreported when log_interval > 1
logging_output = {}
if self.get_num_updates() % self.args.log_interval == 0:
# log memory usage
mem_info = xm.get_memory_info(self.device)
gb_free = mem_info["kb_free"] / 1024 / 1024
gb_total = mem_info["kb_total"] / 1024 / 1024
metrics.log_scalar(
"gb_free",
gb_free,
priority=1500,
round=1,
weight=0,
)
metrics.log_scalar(
"gb_total",
gb_total,
priority=1600,
round=1,
weight=0,
)
logging_output = self._reduce_and_log_stats(
logging_outputs,
sample_size,
grad_norm,
)
# log whenever there's an XLA compilation, since these
# slow down training and may indicate opportunities for
# optimization
self._check_xla_compilation()
else:
# log stats
logging_output = self._reduce_and_log_stats(
logging_outputs,
sample_size,
grad_norm,
)
# clear CUDA cache to reduce memory fragmentation
if (
self.cuda
and self.args.empty_cache_freq > 0
and (
(self.get_num_updates() + self.args.empty_cache_freq - 1)
% self.args.empty_cache_freq
)
== 0
):
torch.cuda.empty_cache()
if self.args.fp16:
metrics.log_scalar(
"loss_scale",
self.optimizer.scaler.loss_scale,
priority=700,
round=4,
weight=0,
)
metrics.log_stop_time("train_wall")
return logging_output
@metrics.aggregate("valid")
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("valid_step") # wait for all workers
xm.mark_step()
with torch.no_grad():
self.model.eval()
self.criterion.eval()
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch)
is_dummy_batch = True
else:
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
is_dummy_batch = False
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
logger.warning(
"ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
# gather logging outputs from all replicas
if self.data_parallel_world_size > 1:
logging_outputs, (sample_size,) = self._aggregate_logging_outputs(
logging_outputs,
sample_size,
ignore=is_dummy_batch,
)
# log validation stats
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_output
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step_begin_epoch(self, epoch):
"""Adjust the learning rate at the beginning of the epoch."""
self.lr_scheduler.step_begin_epoch(epoch)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate at the end of the epoch."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
metrics.log_scalar("lr", new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""[deprecated] Get a specific meter by name."""
from fairseq import meters
if "get_meter" not in self._warn_once:
self._warn_once.add("get_meter")
utils.deprecation_warning(
"Trainer.get_meter is deprecated. Please use fairseq.metrics instead."
)
train_meters = metrics.get_meters("train")
if train_meters is None:
train_meters = {}
if name == "train_loss" and "loss" in train_meters:
return train_meters["loss"]
elif name == "train_nll_loss":
# support for legacy train.py, which assumed this meter is
# always initialized
m = train_meters.get("nll_loss", None)
return m or meters.AverageMeter()
elif name == "wall":
# support for legacy train.py, which assumed this meter is
# always initialized
m = metrics.get_meter("default", "wall")
return m or meters.TimeMeter()
elif name == "wps":
m = metrics.get_meter("train", "wps")
return m or meters.TimeMeter()
elif name in {"valid_loss", "valid_nll_loss"}:
# support for legacy train.py, which assumed these meters
# are always initialized
k = name[len("valid_") :]
m = metrics.get_meter("valid", k)
return m or meters.AverageMeter()
elif name == "oom":
return meters.AverageMeter()
elif name in train_meters:
return train_meters[name]
return None
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
if self.quantizer:
self.quantizer.step_update(self._num_updates)
metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
return self.optimizer.clip_grad_norm(clip_norm, aggregate_norm_fn=None)
def cumulative_training_time(self):
if self._cumulative_training_time is None:
# single GPU
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
"""Aggregate training time in seconds."""
return time.time() - self._start_time + self._previous_training_time
def _prepare_sample(self, sample):
if sample == "DUMMY":
raise Exception(
"Trying to use an uninitialized 'dummy' batch. This usually indicates "
"that the total number of batches is smaller than the number of "
"participating GPUs. Try reducing the batch size or using fewer GPUs."
)
if sample is None or len(sample) == 0:
return None
if self.cuda:
if self.pipeline_model_parallel:
if "target" in sample:
sample["target"] = utils.move_to_cuda(
sample["target"], device=self.last_device
)
else:
sample = utils.move_to_cuda(sample)
def apply_half(t):
if t.dtype is torch.float32:
return t.half()
return t
def apply_bfloat16(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.bfloat16)
return t
if self.args.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if self.args.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
return sample
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
utils.set_torch_seed(seed)
def _sync_stats(self):
# Return True if it's using multiple GPUs and DDP or multiple GPUs with
# BMUF and it's a bmuf sync with warmup iterations completed before.
if self.data_parallel_world_size == 1:
return False
elif self.args.use_bmuf:
return (self.get_num_updates() + 1) % self.args.global_sync_iter == 0 and (
self.get_num_updates() + 1
) > self.args.warmup_iterations
else:
return True
def _log_oom(self, exc):
msg = "OOM: Ran out of memory with exception: {}".format(exc)
logger.warning(msg)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
else:
return self._all_gather_list_sync(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
def _all_gather_list_sync(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. all_gather_list_sync is
suitable when logging outputs are complex types.
"""
if self.tpu:
raise NotImplementedError
if ignore:
logging_outputs = []
results = list(
zip(
*distributed_utils.all_gather_list(
[logging_outputs] + list(extra_stats_to_sum),
max_size=getattr(self.args, "all_gather_list_size", 16384),
group=self.data_parallel_process_group,
)
)
)
logging_outputs, extra_stats_to_sum = results[0], results[1:]
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return logging_outputs, extra_stats_to_sum
def _fast_stat_sync_sum(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. fast_stat_sync_sum is
faster than all_gather_list_sync, but is only suitable when
logging outputs are scalars and can be summed. Note that
*logging_outputs* cannot contain any nested dicts/lists.
"""
data = {}
for i, stat in enumerate(extra_stats_to_sum):
data["extra_stats_" + str(i)] = stat
if len(logging_outputs) > 0:
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if not ignore:
v = sum(log[k] for log in logging_outputs if k in log)
else:
v = logging_outputs[0][k]
v = torch.zeros_like(v) if torch.is_tensor(v) else 0
data["logging_outputs_" + k] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(
data, device=self.device, group=self.data_parallel_process_group
)
extra_stats_to_sum = [
data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum))
]
if log_keys is not None:
logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}]
else:
logging_outputs = []
return logging_outputs, extra_stats_to_sum
def _check_grad_norms(self, grad_norm):
"""Check that grad norms are consistent across workers."""
if self._grad_norm_buf is not None:
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(
self._grad_norm_buf, group=self.data_parallel_process_group
)
def is_consistent(tensor):
max_abs_diff = torch.max(torch.abs(tensor - tensor[0]))
return (
torch.isfinite(tensor).all()
or (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()
)
if not is_consistent(self._grad_norm_buf):
pretty_detail = "\n".join(
"rank {:3d} = {:.8f}".format(r, n)
for r, n in enumerate(self._grad_norm_buf.tolist())
)
error_detail = "grad_norm across the workers:\n{}\n".format(
pretty_detail
)
# use FloatingPointError to trigger NanDetector
raise FloatingPointError(
"Fatal error: gradients are inconsistent between workers. "
"Try --ddp-backend=no_c10d. "
"Or are you mixing up different generation of GPUs in training?"
+ "\n"
+ "-" * 80
+ "\n{}\n".format(error_detail)
+ "-" * 80
)
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
if grad_norm is not None:
metrics.log_speed("ups", 1.0, priority=100, round=2)
metrics.log_scalar("gnorm", grad_norm, priority=400, round=3)
if self.args.clip_norm > 0:
metrics.log_scalar(
"clip",
torch.where(
grad_norm > self.args.clip_norm,
grad_norm.new_tensor(100),
grad_norm.new_tensor(0),
),
priority=500,
round=1,
)
with metrics.aggregate() as agg:
if logging_outputs is not None:
self.task.reduce_metrics(logging_outputs, self.get_criterion())
del logging_outputs
# extra warning for criterions that don't properly log a loss value
if "loss" not in agg:
if "loss" not in self._warn_once:
self._warn_once.add("loss")
logger.warning(
"Criterion.reduce_metrics did not log a 'loss' value, "
"which may break some functionality"
)
metrics.log_scalar("loss", -1)
# support legacy interface
if self.tpu:
logging_output = {}
else:
logging_output = agg.get_smoothed_values()
logging_output["sample_size"] = sample_size
for key_to_delete in ["ppl", "wps", "wpb", "bsz"]:
if key_to_delete in logging_output:
del logging_output[key_to_delete]
return logging_output
def _check_xla_compilation(self):
import torch_xla.debug.metrics as met
compile_stats = met.metric_data("CompileTime")
if compile_stats is None:
return
num_xla_compiles = compile_stats[0]
if num_xla_compiles > self._num_xla_compiles:
logger.warning(
"XLA compilation detected on device #{}; too many of these can lead "
"to slow training, but we expect a few in the beginning".format(
self.args.distributed_rank
)
)
self._num_xla_compiles = num_xla_compiles
def _catalog_shared_params(module, memo=None, prefix=""):
if memo is None:
first_call = True
memo = {}
else:
first_call = False
for name, param in module._parameters.items():
param_prefix = prefix + ("." if prefix else "") + name
if param not in memo:
memo[param] = []
memo[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
_catalog_shared_params(m, memo, submodule_prefix)
if first_call:
return [x for x in memo.values() if len(x) > 1]
def _get_module_by_path(module, path):
path = path.split(".")
for name in path:
module = getattr(module, name)
return module
def _set_module_by_path(module, path, value):
path = path.split(".")
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value)
| 47,171 | 38.082022 | 178 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/data_utils_sc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import os
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
logger = logging.getLogger(__name__)
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
for filename in os.listdir(path):
parts = filename.split(".")
if len(parts) >= 3 and len(parts[1].split("-")) == 2:
return parts[1].split("-")
return src, dst
def collate_tokens(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
def collate_2d_tokens(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
hidden_size = values[0].size(1)
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size, hidden_size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
def collate_2d_phones(
values,
pad_idx,
eos_idx=None,
left_pad=False,
move_eos_to_beginning=False,
pad_to_length=None,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
if move_eos_to_beginning:
raise ValueError("Need check to make sure move_eos_to_beginning performs right in this func")
hidden_size = max(i.size(1) for i in values)
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size, hidden_size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
if eos_idx is None:
# if no eos_idx is specified, then use the last token in src
dst[0] = src[-1]
else:
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :, :v.size(1)] if left_pad else res[i][: len(v), :v.size(1)])
return res
def load_indexed_dataset(
path, dictionary=None, dataset_impl=None, combine=False, default="cached"
):
"""A helper function for loading indexed datasets.
Args:
path (str): path to indexed dataset (e.g., 'data-bin/train')
dictionary (~fairseq.data.Dictionary): data dictionary
dataset_impl (str, optional): which dataset implementation to use. If
not provided, it will be inferred automatically. For legacy indexed
data we use the 'cached' implementation by default.
combine (bool, optional): automatically load and combine multiple
datasets. For example, if *path* is 'data-bin/train', then we will
combine 'data-bin/train', 'data-bin/train1', ... and return a
single ConcatDataset instance.
"""
from fairseq.data.concat_dataset import ConcatDataset
import fairseq.data.indexed_dataset as indexed_dataset
datasets = []
for k in itertools.count():
path_k = path + (str(k) if k > 0 else "")
path_k = indexed_dataset.get_indexed_dataset_to_local(path_k)
dataset_impl_k = dataset_impl
if dataset_impl_k is None:
dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k)
dataset = indexed_dataset.make_dataset(
path_k,
impl=dataset_impl_k or default,
fix_lua_indexing=True,
dictionary=dictionary,
)
if dataset is None:
break
logger.info("loaded {} examples from: {}".format(len(dataset), path_k))
datasets.append(dataset)
if not combine:
break
if len(datasets) == 0:
return None
elif len(datasets) == 1:
return datasets[0]
else:
return ConcatDataset(datasets)
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def collect_filtered(function, iterable, filtered):
"""
Similar to :func:`filter` but collects filtered elements in ``filtered``.
Args:
function (callable): function that returns ``False`` for elements that
should be filtered
iterable (iterable): iterable to filter
filtered (list): list to store filtered elements
"""
for el in iterable:
if function(el):
yield el
else:
filtered.append(el)
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def compare_leq(a, b):
return a <= b if not isinstance(a, tuple) else max(a) <= b
def check_size(idx):
if isinstance(max_positions, float) or isinstance(max_positions, int):
return size_fn(idx) <= max_positions
elif isinstance(max_positions, dict):
idx_size = size_fn(idx)
assert isinstance(idx_size, dict)
intersect_keys = set(max_positions.keys()) & set(idx_size.keys())
return all(
all(
a is None or b is None or a <= b
for a, b in zip(idx_size[key], max_positions[key])
)
for key in intersect_keys
)
else:
# Hacky as heck, for the specific case of multilingual training with RoundRobin.
if isinstance(size_fn(idx), dict) and isinstance(max_positions, tuple):
return all(
a is None or b is None or compare_leq(a, b)
for a, b in zip(size_fn(idx).values(), max_positions)
)
# For MultiCorpusSampledDataset, will generalize it later
if not isinstance(size_fn(idx), Iterable):
return all(size_fn(idx) <= b for b in max_positions)
return all(
a is None or b is None or a <= b
for a, b in zip(size_fn(idx), max_positions)
)
ignored = []
itr = collect_filtered(check_size, indices, ignored)
indices = np.fromiter(itr, dtype=np.int64, count=-1)
return indices, ignored
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
"""
[deprecated] Filter indices based on their size.
Use `FairseqDataset::filter_indices_by_size` instead.
Args:
indices (List[int]): ordered list of dataset indices
dataset (FairseqDataset): fairseq dataset instance
max_positions (tuple): filter elements larger than this size.
Comparisons are done component-wise.
raise_exception (bool, optional): if ``True``, raise an exception if
any elements are filtered (default: False).
"""
warnings.warn(
"data_utils.filter_by_size is deprecated. "
"Use `FairseqDataset::filter_indices_by_size` instead.",
stacklevel=2,
)
if isinstance(max_positions, float) or isinstance(max_positions, int):
if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray):
ignored = indices[dataset.sizes[indices] > max_positions].tolist()
indices = indices[dataset.sizes[indices] <= max_positions]
elif (
hasattr(dataset, "sizes")
and isinstance(dataset.sizes, list)
and len(dataset.sizes) == 1
):
ignored = indices[dataset.sizes[0][indices] > max_positions].tolist()
indices = indices[dataset.sizes[0][indices] <= max_positions]
else:
indices, ignored = _filter_by_size_dynamic(
indices, dataset.size, max_positions
)
else:
indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if len(ignored) > 0 and raise_exception:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
if len(ignored) > 0:
logger.warning(
(
"{} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if max_sizes is None:
return indices, []
if type(max_sizes) in (int, float):
max_src_size, max_tgt_size = max_sizes, max_sizes
else:
max_src_size, max_tgt_size = max_sizes
if tgt_sizes is None:
ignored = indices[src_sizes[indices] > max_src_size]
else:
ignored = indices[
(src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size)
]
if len(ignored) > 0:
if tgt_sizes is None:
indices = indices[src_sizes[indices] <= max_src_size]
else:
indices = indices[
(src_sizes[indices] <= max_src_size)
& (tgt_sizes[indices] <= max_tgt_size)
]
return indices, ignored.tolist()
def batch_by_size(
indices,
num_tokens_fn,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
fixed_shapes=None,
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be less than N or a multiple of N (default: 1).
fixed_shapes (List[Tuple[int, int]], optional): if given, batches will
only be created with the given shapes. *max_sentences* and
*required_batch_size_multiple* will be ignored (default: None).
"""
try:
from fairseq.data.data_utils_fast import (
batch_by_size_fast,
batch_fixed_shapes_fast,
)
except ImportError:
raise ImportError(
"Please build Cython components with: `pip install --editable .` "
"or `python setup.py build_ext --inplace`"
)
max_tokens = max_tokens if max_tokens is not None else -1
max_sentences = max_sentences if max_sentences is not None else -1
bsz_mult = required_batch_size_multiple
if not isinstance(indices, np.ndarray):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
if fixed_shapes is None:
return batch_by_size_fast(
indices,
num_tokens_fn,
max_tokens,
max_sentences,
bsz_mult,
)
else:
fixed_shapes = np.array(fixed_shapes, dtype=np.int64)
sort_order = np.lexsort(
[
fixed_shapes[:, 1].argsort(), # length
fixed_shapes[:, 0].argsort(), # bsz
]
)
fixed_shapes_sorted = fixed_shapes[sort_order]
return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted)
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == "wordpiece":
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == "letter":
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol is not None and symbol != "none":
sentence = (sentence + " ").replace(symbol, "").rstrip()
return sentence
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
def get_mem_usage():
try:
import psutil
mb = 1024 * 1024
return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb"
except ImportError:
return "N/A"
def lengths_to_padding_mask(lens: torch.LongTensor) -> torch.BoolTensor:
bsz, max_lens = lens.size(0), torch.max(lens).item()
mask = torch.arange(max_lens).to(lens.device).view(1, max_lens)
mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens)
return mask
def lengths_to_mask(lens: torch.LongTensor) -> torch.BoolTensor:
return ~lengths_to_padding_mask(lens)
| 20,635 | 35.140105 | 120 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/dictionary_sc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
from multiprocessing import Pool
import torch
from fairseq import utils
from fairseq.binarizer import safe_readline
from fairseq.data import data_utils
from fairseq.file_io import PathManager
from fairseq.tokenizer import tokenize_line
class Dictionary_sc(object):
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
bos="<s>",
pad="<pad>",
eos="</s>",
unk="<unk>",
extra_special_symbols=None,
pad_first=False,
):
self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
if pad_first:
self.pad_index = self.add_symbol(pad)
self.bos_index = self.add_symbol(bos)
else:
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(
self,
tensor,
bpe_symbol=None,
escape_unk=False,
extra_symbols_to_ignore=None,
unk_string=None,
):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return "\n".join(
self.string(t, bpe_symbol, escape_unk, extra_symbols_to_ignore)
for t in tensor
)
extra_symbols_to_ignore = set(extra_symbols_to_ignore or [])
extra_symbols_to_ignore.add(self.eos())
def token_string(i):
if i == self.unk():
if unk_string is not None:
return unk_string
else:
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, "bos_index"):
extra_symbols_to_ignore.add(self.bos())
sent = " ".join(
token_string(i)
for i in tensor
if utils.item(i) not in extra_symbols_to_ignore
)
return data_utils.post_process(sent, bpe_symbol)
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
def void(self):
# set void token
return self.indices['<void>']
def mask(self):
# set void token
return self.indices['<mask>']
def gttoken(self):
# set void token
try:
return self.indices['<gttoken>']
except:
print('<gttoken> not found in dictionary!')
return None
@classmethod
def load(cls, f, pad_first=False):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls(pad_first=pad_first)
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
try:
line, field = line.rstrip().rsplit(" ", 1)
if field == "#fairseq:overwrite":
overwrite = True
line, field = line.rsplit(" ", 1)
else:
overwrite = False
count = int(field)
word = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(word)
)
self.add_symbol(word, n=count, overwrite=overwrite)
except ValueError:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt> [flags]'"
)
def _save(self, f, kv_iterator):
if isinstance(f, str):
PathManager.mkdirs(os.path.dirname(f))
with PathManager.open(f, "w", encoding="utf-8") as fd:
return self.save(fd)
for k, v in kv_iterator:
print("{} {}".format(k, v), file=f)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(
f,
zip(
ex_keys + self.symbols[self.nspecial :],
ex_vals + self.count[self.nspecial :],
),
)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
def encode_line(
self,
line,
line_tokenizer=tokenize_line,
add_if_not_exist=True,
consumer=None,
append_eos=True,
reverse_order=False,
):
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
@staticmethod
def _add_file_to_dictionary_single_worker(
filename, tokenize, eos_word, worker_id=0, num_workers=1
):
counter = Counter()
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
if f.tell() > end:
break
line = f.readline()
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
if num_workers > 1:
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(
pool.apply_async(
Dictionary_sc._add_file_to_dictionary_single_worker,
(filename, tokenize, dict.eos_word, worker_id, num_workers),
)
)
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(
Dictionary_sc._add_file_to_dictionary_single_worker(
filename, tokenize, dict.eos_word
)
)
| 12,677 | 31.259542 | 87 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/options_fc.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.data_class import (
CheckpointParams,
CommonEvalParams,
CommonParams,
DatasetParams,
DistributedTrainingParams,
EvalLMParams,
OptimizationParams,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalParams())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonParams())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
group.add_argument("--src-with-werdur", action="store_true", default=False,
help="whether the src file contains werdur-info")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetParams())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingParams(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationParams())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointParams())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalParams())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMParams())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
# fmt: off
group.add_argument('--beam', default=5, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--match-source-len', default=False, action='store_true',
help=('generations should match the source length'))
group.add_argument('--no-early-stop', action='store_true',
help='deprecated')
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
group.add_argument('--score-reference', action='store_true',
help='just score the reference translation')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N',
help='ngram blocking such that this size ngram cannot be repeated in the generation')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',
help='sample from the smallest set whose cumulative probability mass exceeds p for next words')
group.add_argument('--constraints', const="ordered", nargs="?", choices=["ordered", "unordered"],
help='enables lexically constrained decoding')
group.add_argument('--temperature', default=1., type=float, metavar='N',
help='temperature for generation')
group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N',
help='number of groups for Diverse Beam Search')
group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',
help='strength of diversity penalty for Diverse Beam Search')
group.add_argument('--diversity-rate', default=-1.0, type=float, metavar='N',
help='strength of diversity penalty for Diverse Siblings Search')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--print-step', action='store_true')
group.add_argument('--lm-path', default=None, type=str, metavar='PATH',
help='path to lm checkpoint for lm fusion')
group.add_argument('--lm-weight', default=0.0, type=float, metavar='N',
help='weight for lm probs for lm fusion')
# arguments for iterative refinement generator
group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N',
help='if > 0.0, it penalized early-stopping in decoding.')
group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N',
help='maximum iterations for iterative refinement.')
group.add_argument('--iter-decode-force-max-iter', action='store_true',
help='if set, run exact the maximum number of iterations without early stop')
group.add_argument('--iter-decode-with-beam', default=1, type=int, metavar='N',
help='if > 1, model will generate translations varying by the lengths.')
group.add_argument('--iter-decode-with-external-reranker', action='store_true',
help='if set, the last checkpoint are assumed to be a reranker to rescore the translations'),
group.add_argument('--retain-iter-history', action='store_true',
help='if set, decoding returns the whole history of iterative refinement')
group.add_argument('--retain-dropout', action='store_true',
help='Use dropout at inference time')
group.add_argument('--retain-dropout-modules', default=None, nargs='+', type=str,
help='if set, only retain dropout for the specified modules; '
'if not set, then dropout will be retained for all modules')
# special decoding format for advanced decoding.
group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs'])
# fmt: on
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
# fmt: off
group.add_argument('--buffer-size', default=0, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
group.add_argument('--input', default='-', type=str, metavar='FILE',
help='file to read from; use - for stdin')
# fmt: on
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
| 19,822 | 43.346756 | 120 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/sc_utils/corrector_ds.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import FairseqDataset
import random
import math
import copy
import json
import data_utils_sc
logger = logging.getLogger(__name__)
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
pad_to_multiple=1,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None, merge_phone=False):
if merge_phone:
return data_utils_sc.collate_2d_phones(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
elif len(samples[0][key].shape) == 1:
return data_utils_sc.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
elif len(samples[0][key].shape) == 2:
return data_utils_sc.collate_2d_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
else:
raise ValueError("Unsupported condition!")
def check_alignment(alignment, src_len, tgt_len):
if alignment is None or len(alignment) == 0:
return False
if (
alignment[:, 0].max().item() >= src_len - 1
or alignment[:, 1].max().item() >= tgt_len - 1
):
logger.warning("alignment size mismatch found, skipping alignment!")
return False
return True
def compute_alignment_weights(alignments):
"""
Given a tensor of shape [:, 2] containing the source-target indices
corresponding to the alignments, a weight vector containing the
inverse frequency of each target index is computed.
For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then
a tensor containing [1., 0.5, 0.5, 1] should be returned (since target
index 3 is repeated twice)
"""
align_tgt = alignments[:, 1]
_, align_tgt_i, align_tgt_c = torch.unique(
align_tgt, return_inverse=True, return_counts=True
)
align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]]
return 1.0 / align_weights.float()
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
if len(samples[0]["source"].shape) == 1:
src_lengths = torch.LongTensor(
[s["source"].ne(pad_idx).long().sum() for s in samples]
)
elif len(samples[0]["source"].shape) == 2:
src_lengths = torch.LongTensor(
[s["source"][:, 0].ne(pad_idx).long().sum() for s in samples]
)
else:
raise ValueError("Unsupported condition!")
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("wer_dur", None) is not None:
wer_dur = merge(
"wer_dur",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
wer_dur = wer_dur.index_select(0, sort_order)
to_be_edited = merge(
"to_be_edited",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
to_be_edited = to_be_edited.index_select(0, sort_order)
if samples[0].get("closest_label", None) is not None:
closest_label = merge(
"closest_label",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
closest_label = closest_label.index_select(0, sort_order)
else:
closest_label = None
else:
closest_label = None
wer_dur = None
to_be_edited = None
if samples[0].get("source_phone", None) is not None:
source_phone = merge(
"source_phone",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
merge_phone=True
)
source_phone = source_phone.index_select(0, sort_order)
else:
source_phone = None
if samples[0].get("mask_type", None) is not None:
mask_type = merge(
"mask_type",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
mask_type = mask_type.index_select(0, sort_order)
else:
mask_type = None
if samples[0].get("target_full", None) is not None:
target_full = merge(
"target_full",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
target_full = target_full.index_select(0, sort_order)
else:
target_full = None
if samples[0].get("target_ctc", None) is not None:
target_ctc = merge(
"target_ctc",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
target_ctc = target_ctc.index_select(0, sort_order)
else:
target_ctc = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
if samples[0].get("for_wer_gather", None) is not None:
for_wer_gather = merge(
"for_wer_gather",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
for_wer_gather = for_wer_gather.index_select(0, sort_order)
else:
for_wer_gather = None
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
).index_select(0, sort_order)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target)
elif input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
else:
ntokens = src_lengths.sum().item()
for_wer_gather = None
if samples[0].get("wer_dur", None) is not None:
#print("source_phone in colltor:", samples[0]["source_phone"])
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"wer_dur": wer_dur,
"to_be_edited": to_be_edited,
"for_wer_gather": for_wer_gather,
"closest_label": closest_label,
"source_phone": source_phone,
"mask_type": mask_type,
"target_full": target_full,
"target_ctc": target_ctc,
},
"target": target,
}
else:
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"source_phone": source_phone,
"mask_type": mask_type,
"target_full": target_full,
"target_ctc": target_ctc,
},
"target": target,
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select(
0, sort_order
)
if samples[0].get("alignment", None) is not None:
bsz, tgt_sz = batch["target"].shape
src_sz = batch["net_input"]["src_tokens"].shape[1]
offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)
offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz
if left_pad_source:
offsets[:, 0] += src_sz - src_lengths
if left_pad_target:
offsets[:, 1] += tgt_sz - tgt_lengths
alignments = [
alignment + offset
for align_idx, offset, src_len, tgt_len in zip(
sort_order, offsets, src_lengths, tgt_lengths
)
for alignment in [samples[align_idx]["alignment"].view(-1, 2)]
if check_alignment(alignment, src_len, tgt_len)
]
if len(alignments) > 0:
alignments = torch.cat(alignments, dim=0)
align_weights = compute_alignment_weights(alignments)
batch["alignments"] = alignments
batch["align_weights"] = align_weights
if samples[0].get("constraints", None) is not None:
# Collate the packed constraints across the samples, padding to
# the length of the longest sample.
lens = [sample.get("constraints").size(0) for sample in samples]
max_len = max(lens)
constraints = torch.zeros((len(samples), max(lens))).long()
for i, sample in enumerate(samples):
constraints[i, 0 : lens[i]] = samples[i].get("constraints")
batch["constraints"] = constraints
return batch
class LanguagePairDataset(FairseqDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
src (torch.utils.data.Dataset): source dataset to wrap
src_sizes (List[int]): source sentence lengths
src_dict (~fairseq.data.Dictionary): source vocabulary
tgt (torch.utils.data.Dataset, optional): target dataset to wrap
tgt_sizes (List[int], optional): target sentence lengths
tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary
left_pad_source (bool, optional): pad source tensors on the left side
(default: True).
left_pad_target (bool, optional): pad target tensors on the left side
(default: False).
shuffle (bool, optional): shuffle dataset elements before batching
(default: True).
input_feeding (bool, optional): create a shifted version of the targets
to be passed into the model for teacher forcing (default: True).
remove_eos_from_source (bool, optional): if set, removes eos from end
of source if it's present (default: False).
append_eos_to_target (bool, optional): if set, appends eos to end of
target if it's absent (default: False).
align_dataset (torch.utils.data.Dataset, optional): dataset
containing alignments.
constraints (Tensor, optional): 2d tensor with a concatenated, zero-
delimited list of constraints for each sentence.
append_bos (bool, optional): if set, appends bos to the beginning of
source/target sentence.
num_buckets (int, optional): if set to a value greater than 0, then
batches will be bucketed into the given number of batch shapes.
src_lang_id (int, optional): source language ID, if set, the collated batch
will contain a field 'src_lang_id' in 'net_input' which indicates the
source language of the samples.
tgt_lang_id (int, optional): target language ID, if set, the collated batch
will contain a field 'tgt_lang_id' which indicates the target language
of the samples.
"""
def __init__(
self,
src,
src_sizes,
src_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
left_pad_source=True,
left_pad_target=False,
shuffle=True,
input_feeding=True,
remove_eos_from_source=False,
append_eos_to_target=False,
align_dataset=None,
constraints=None,
append_bos=False,
eos=None,
num_buckets=0,
src_lang_id=None,
tgt_lang_id=None,
pad_to_multiple=1,
cal_wer_dur=False,
src_with_werdur=False,
bos_prepended_outside=False,
nbest_infer=0,
homophone_dict_path="",
mask_ratio=0.0,
detector_mask_ratio=0.0,
error_distribution=None,
ft_error_distribution=None,
duptoken_error_distribution=None,
force_mask_type=None,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
if tgt is not None:
assert len(src) == len(
tgt
), "Source and target must contain the same number of examples"
self.src = src
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.sizes = (
np.vstack((self.src_sizes, self.tgt_sizes)).T
if self.tgt_sizes is not None
else self.src_sizes
)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.shuffle = shuffle
self.input_feeding = input_feeding
self.remove_eos_from_source = remove_eos_from_source
self.append_eos_to_target = append_eos_to_target
self.align_dataset = align_dataset
if self.align_dataset is not None:
assert (
self.tgt_sizes is not None
), "Both source and target needed when alignments are provided"
self.constraints = constraints
self.append_bos = append_bos
self.eos = eos if eos is not None else src_dict.eos()
self.src_lang_id = src_lang_id
self.tgt_lang_id = tgt_lang_id
if num_buckets > 0:
from fairseq.data import BucketPadLengthDataset
self.src = BucketPadLengthDataset(
self.src,
sizes=self.src_sizes,
num_buckets=num_buckets,
pad_idx=self.src_dict.pad(),
left_pad=self.left_pad_source,
)
self.src_sizes = self.src.sizes
logger.info("bucketing source lengths: {}".format(list(self.src.buckets)))
if self.tgt is not None:
self.tgt = BucketPadLengthDataset(
self.tgt,
sizes=self.tgt_sizes,
num_buckets=num_buckets,
pad_idx=self.tgt_dict.pad(),
left_pad=self.left_pad_target,
)
self.tgt_sizes = self.tgt.sizes
logger.info(
"bucketing target lengths: {}".format(list(self.tgt.buckets))
)
# determine bucket sizes using self.num_tokens, which will return
# the padded lengths (thanks to BucketPadLengthDataset)
num_tokens = np.vectorize(self.num_tokens, otypes=[np.long])
self.bucketed_num_tokens = num_tokens(np.arange(len(self.src)))
self.buckets = [
(None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens)
]
else:
self.buckets = None
self.pad_to_multiple = pad_to_multiple
self.cal_wer_dur = cal_wer_dur
self.src_with_werdur = src_with_werdur
self.nbest_infer = nbest_infer
self.bos_prepended_outside = bos_prepended_outside
if self.cal_wer_dur:
assert not self.src_with_werdur
if self.src_with_werdur:
assert not self.cal_wer_dur
self.phone_size = None
self.vocab_phone_dict = None
if homophone_dict_path:
#print("homophone_dict_path:", homophone_dict_path)
self.homophone_dict = {}
self.top_char_set = []
with open(homophone_dict_path, 'r', encoding='utf-8') as infile:
for num, line in enumerate(infile.readlines()):
line = line.strip().split('\t')
if num < 3000:
self.top_char_set.append(self.src_dict.index(line[0]))
assert len(line) == 2
assert self.src_dict.index(line[0]) != self.src_dict.unk()
self.homophone_dict[self.src_dict.index(line[0])] = [self.src_dict.index(i) for i in line[1].split()]
for i in self.homophone_dict[self.src_dict.index(line[0])]:
assert i != self.src_dict.unk()
else:
self.homophone_dict = None
self.mask_ratio = mask_ratio
self.detector_mask_ratio = detector_mask_ratio
self.error_distribution = error_distribution
if self.detector_mask_ratio != 0.0:
assert self.error_distribution[3] == 0.0
if error_distribution:
assert sum(error_distribution) == 1.0
assert ft_error_distribution is None
self.duptoken_error_distribution = duptoken_error_distribution
if self.duptoken_error_distribution is not None:
assert len(self.duptoken_error_distribution) == 2
assert sum(self.duptoken_error_distribution) == 1.0
if ft_error_distribution:
assert error_distribution is None
# assert duptoken_error_distribution is None
self.ft_error_distribution = ft_error_distribution
self.correct2dup = ft_error_distribution[0]
self.wrong2dup = ft_error_distribution[1]
print("Convert {} correct word to dup token and {} wrong word to CTC dup token".format(self.correct2dup,
self.wrong2dup))
else:
self.correct2dup = None
self.wrong2dup = None
self.ft_error_distribution = None
self.force_mask_type = force_mask_type
#print(self.force_mask_type)
def get_batch_shapes(self):
return self.buckets
def random_mask(self, i, max_length):
result = [i]
prob = random.random()
if prob < 0.25:
if i - 1 >= 1:
result.append(i-1)
elif prob > 0.75:
if i - 1 >= 1:
result.append(i-1)
if i - 1 >= 2:
result.append(i-2)
prob = random.random()
if prob < 0.25:
if i+1 < max_length - 1:
result.append(i+1)
elif prob > 0.75:
if i+1 < max_length - 1:
result.append(i+1)
if i+1 < max_length - 2:
result.append(i+2)
return result
def apply_mask(self, src_item_list, target_full_list, target_ctc_list, mask_type_list, token, mask_choose_id, dup_type=0, next_token=None, is_pretrain=True):
token = int(token)
if next_token is not None:
assert dup_type == 2
assert mask_choose_id == 4
next_token = int(next_token)
if mask_choose_id == 1:
assert is_pretrain
# same token
src_item_list.append(token)
target_full_list.append(token)
mask_type_list.append(1)
if len(src_item_list) == 1 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(token)
elif mask_choose_id == 2:
# <mask>
assert is_pretrain
src_item_list.append(self.src_dict.mask())
target_full_list.append(token)
mask_type_list.append(2)
if len(src_item_list) == 1 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(token)
elif mask_choose_id == 3:
# homophone
candidate_logit = [6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1]
if token not in self.homophone_dict.keys():
assert self.apply_mask_toall
src_item_list.append(token)
target_full_list.append(token)
mask_type_list.append(1)
if len(src_item_list) == 1 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(token)
# raise ValueError("Impossile condition!")
# homophone = np.random.choice(homophone_dictionary["freq_token"])
else:
candidate = self.homophone_dict[token]
prob_candidate = [i / sum(candidate_logit[:len(candidate)]) for i in candidate_logit[:len(candidate)]]
homophone = np.random.choice(candidate, p=prob_candidate)
src_item_list.append(homophone)
target_full_list.append(token)
mask_type_list.append(3)
if len(src_item_list) == 1 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(token)
elif mask_choose_id == 4:
if dup_type != 0:
if random.random() < 0.5:
candidate_logit = [6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1]
if dup_type == 1 or random.random() < 0.5:
candidate = self.homophone_dict[token]
else:
candidate = self.homophone_dict[next_token]
prob_candidate = [i / sum(candidate_logit[:len(candidate)]) for i in
candidate_logit[:len(candidate)]]
homophone = np.random.choice(candidate, p=prob_candidate)
else:
if dup_type == 1 or random.random() < 0.5:
homophone = token
else:
homophone = next_token
for _ in range(3):
src_item_list.append(homophone)
target_full_list.append(token)
mask_type_list.append(4)
target_ctc_list.append(token)
if dup_type == 2:
target_full_list[-1] = next_token
assert next_token is not None
target_ctc_list.append(next_token)
elif mask_choose_id == 5:
# <mask> to random token
# assert is_pretrain
src_item_list.append(int(np.random.choice(self.top_char_set)))
target_full_list.append(token)
mask_type_list.append(5)
if len(src_item_list) == 1 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(token)
elif mask_choose_id == 6:
# <mask>
assert is_pretrain
src_item_list.append(self.src_dict.mask())
target_full_list.append(self.src_dict.void())
mask_type_list.append(6)
if len(src_item_list) == 1 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(self.src_dict.void())
else:
raise ValueError("impossible mask_choose_id")
return src_item_list, target_full_list, target_ctc_list, mask_type_list
def build_example_for_mask(self, src_item, index, werdur_info=None, tgt_item=None, error_dis=None):
if error_dis is None:
error_dis = self.error_distribution
target = src_item
mask_type_list = []
src_item_list = []
target_full_list = []
target_ctc_list = []
if self.force_mask_type is not None:
if self.force_mask_type == "detector_infer" or self.force_mask_type == "dec_infer":
self.force_mask_type = [0 for _ in range(len(src_item))]
assert len(src_item) == len(self.force_mask_type)
for pos in range(len(src_item)):
if self.force_mask_type[pos] in [0, 3]:
src_item_list.append(src_item[pos])
target_full_list.append(src_item[pos])
mask_type_list.append(self.force_mask_type[pos])
target_ctc_list.append(src_item[pos])
elif self.force_mask_type[pos] == 4:
for _ in range(3):
src_item_list.append(src_item[pos])
target_full_list.append(src_item[pos])
mask_type_list.append(4)
target_ctc_list.append(src_item[pos])
else:
raise ValueError("Not support mask type")
else:
# for pos in range(len(src_item)):
pos = 0
while pos < len(src_item):
if pos == 0 or pos == len(src_item) - 1:
src_item_list.append(src_item[pos])
target_full_list.append(src_item[pos])
mask_type_list.append(0)
target_ctc_list.append(src_item[pos])
pos += 1
continue
if int(src_item[pos]) not in self.homophone_dict.keys():
src_item_list.append(src_item[pos])
target_full_list.append(src_item[pos])
mask_type_list.append(0)
if pos == 0 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(src_item[pos])
pos += 1
else:
add_mask_var = random.random()
if add_mask_var <= self.mask_ratio:
addition_pos = 0
dup_type = 0 #0: not dup, 1 dup to itself, 2 dup to homo
mask_choose_var = random.random()
prob_thre = error_dis[0]
mask_choose_id = 0
while mask_choose_var > prob_thre:
mask_choose_id = mask_choose_id + 1
prob_thre = prob_thre + error_dis[mask_choose_id]
if mask_choose_id + 1 == 4:
if self.duptoken_error_distribution is not None:
if pos == len(src_item) - 1 or int(src_item[pos + 1]) not in self.homophone_dict.keys():
addition_pos = 0
dup_type = 1
elif pos == len(src_item) - 2 and (src_item[pos - 1] == src_item[pos] == src_item[pos + 1]):
addition_pos = 0
dup_type = 1
elif (pos < len(src_item) - 2) and (int(src_item[pos - 1] == src_item[pos]) + int(src_item[pos] == src_item[pos+1]) + int(src_item[pos+1] == src_item[pos+2])> 1):
addition_pos = 0
dup_type = 1
elif random.random() < self.duptoken_error_distribution[0]:
addition_pos = 0
dup_type = 1
else:
addition_pos = 1
dup_type = 2
if dup_type != 2:
src_item_list, target_full_list, target_ctc_list, mask_type_list = self.apply_mask(
src_item_list, target_full_list, target_ctc_list, mask_type_list, src_item[pos], mask_choose_id + 1,
dup_type=dup_type)
else:
src_item_list, target_full_list, target_ctc_list, mask_type_list = self.apply_mask(src_item_list,
target_full_list, target_ctc_list, mask_type_list, src_item[pos], mask_choose_id + 1, dup_type=dup_type, next_token=src_item[pos+1])
if mask_choose_id != 5:
pos = pos + addition_pos + 1
else:
src_item_list.append(src_item[pos])
target_full_list.append(src_item[pos])
mask_type_list.append(0)
if pos == 0 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(src_item[pos])
pos += 1
mask_type = torch.LongTensor(mask_type_list)
src_item = torch.LongTensor(src_item_list)
target_full = torch.LongTensor(target_full_list)
target_ctc = torch.LongTensor(target_ctc_list)
example = {
"id": index,
"source": src_item,
"target": target,
"target_full": target_full,
"wer_dur": None,
"to_be_edited": None,
"for_wer_gather": None,
"source_phone": None,
"mask_type": mask_type,
"target_ctc": target_ctc
}
return example
def build_example_for_detector(self, src_item, index, werdur_info=None, tgt_item=None, is_infer=True):
if self.force_mask_type is not None:
assert self.force_mask_type == "detector_infer" or self.force_mask_type == "dec_infer"
example = self.build_example_for_mask(src_item, index, werdur_info, tgt_item, error_dis=[1.0, 0.0, 0.0, 0.0, 0.0])
assert (example["mask_type"] > 1).long().sum() == 0
return example
else:
detector_mask_var = random.random()
if detector_mask_var > self.detector_mask_ratio:
return self.build_example_for_mask(src_item, index, werdur_info, tgt_item)
else:
return self.build_example_for_mask(src_item, index, werdur_info, tgt_item, error_dis=[0.1, 0.0, 0.6, 0.0, 0.3])
def build_example_for_finetune(self, src_item, index, tgt_item, to_be_edited, werdur):
mask_type_list = []
decoder_input_list = []
target_full_list = []
target_ctc_list = []
# encoder_label_list = []
pos = 0
tgt_pos = 0
while pos < len(src_item):
if pos == 0 or pos == len(src_item) - 1:
assert werdur[pos] == 1
assert to_be_edited[pos] == 1
assert src_item[pos] == tgt_item[tgt_pos]
decoder_input_list.append(src_item[pos])
target_full_list.append(tgt_item[tgt_pos])
mask_type_list.append(0)
target_ctc_list.append(tgt_item[tgt_pos])
# encoder_label_list.append([int(tgt_item[tgt_pos]), self.src_dict.pad(), self.src_dict.pad()])
pos += 1
tgt_pos += 1
continue
else:
add_mask_var = random.random()
if int(to_be_edited[pos]) == 1: #right token
assert int(werdur[pos]) == 1
assert src_item[pos] == tgt_item[tgt_pos], (pos, tgt_pos, src_item[pos], tgt_item[tgt_pos], src_item, tgt_item, werdur, to_be_edited)
if add_mask_var < self.correct2dup:
target_ctc_list.append(tgt_item[tgt_pos])
for _ in range(3):
decoder_input_list.append(src_item[pos])
target_full_list.append(tgt_item[tgt_pos])
mask_type_list.append(4)
else:
decoder_input_list.append(src_item[pos])
target_full_list.append(tgt_item[tgt_pos])
mask_type_list.append(0)
if tgt_pos == 0 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(tgt_item[tgt_pos])
# encoder_label_list.append([int(tgt_item[tgt_pos]), self.src_dict.pad(), self.src_dict.pad()])
pos += 1
tgt_pos += 1
elif int(werdur[pos]) == 0:
# encoder_label_list.append([self.src_dict.void(), self.src_dict.pad(), self.src_dict.pad()])
pos += 1
elif int(werdur[pos]) == 1:
if src_item[pos] == tgt_item[tgt_pos]:
assert int(tgt_item[tgt_pos]) == 3, (src_item[pos], tgt_item[tgt_pos], pos, tgt_pos, src_item, tgt_item, werdur, to_be_edited)
#assert src_item[pos] != tgt_item[tgt_pos], (src_item[pos], tgt_item[tgt_pos], pos, tgt_pos, src_item, tgt_item, werdur, to_be_edited)
if add_mask_var < self.wrong2dup:
target_ctc_list.append(tgt_item[tgt_pos])
for j in range(3):
decoder_input_list.append(src_item[pos])
target_full_list.append(tgt_item[tgt_pos])
mask_type_list.append(4)
else:
decoder_input_list.append(src_item[pos])
target_full_list.append(tgt_item[tgt_pos])
mask_type_list.append(3)
if tgt_pos == 0 or mask_type_list[-2] == 4 or target_full_list[-1] != target_full_list[-2]:
target_ctc_list.append(tgt_item[tgt_pos])
# encoder_label_list.append([int(tgt_item[tgt_pos]), self.src_dict.pad(), self.src_dict.pad()])
pos += 1
tgt_pos += 1
else:
assert 3 >= int(werdur[pos]) > 1, (werdur[pos], src_item, tgt_item, werdur, to_be_edited)
for j in range(int(werdur[pos])):
target_ctc_list.append(tgt_item[tgt_pos + j])
for j in range(3):
decoder_input_list.append(src_item[pos])
target_full_list.append(tgt_item[tgt_pos])
mask_type_list.append(4)
# if int(werdur[pos]) == -2:
# encoder_label_list.append([int(tgt_item[tgt_pos]), int(tgt_item[tgt_pos + 1]), self.src_dict.pad()])
# else:
# encoder_label_list.append(
# [int(tgt_item[tgt_pos]), int(tgt_item[tgt_pos + 1]), int(tgt_item[tgt_pos + 2])])
tgt_pos += abs(werdur[pos])
pos += 1
assert pos == len(src_item)
assert tgt_pos == len(tgt_item)
mask_type = torch.LongTensor(mask_type_list)
decoder_input = torch.LongTensor(decoder_input_list)
target_full = torch.LongTensor(target_full_list)
target_ctc = torch.LongTensor(target_ctc_list)
# encoder_label = torch.LongTensor(encoder_label_list)
example = {
"id": index,
"source": decoder_input,
"target": tgt_item,
"target_full": target_full,
"wer_dur": None,
"to_be_edited": None,
"for_wer_gather": None,
"source_phone": None,
"mask_type": mask_type,
"target_ctc": target_ctc,
}
return example
def __getitem__(self, index):
#if self.mask_ratio != 0.0:
# assert self.tgt is None
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item = self.src[index]
# Append EOS to end of tgt sentence if it does not have an EOS and remove
# EOS from end of src sentence if it exists. This is useful when we use
# use existing datasets for opposite directions i.e., when we want to
# use tgt_dataset as src_dataset and vice versa
if self.append_eos_to_target:
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.tgt and self.tgt[index][-1] != eos:
tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
if self.append_bos:
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
if self.tgt and self.tgt[index][0] != bos:
tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]])
bos = self.src_dict.bos()
if self.src[index][0] != bos:
src_item = torch.cat([torch.LongTensor([bos]), self.src[index]])
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if self.src[index][-1] == eos:
src_item = self.src[index][:-1]
if (self.mask_ratio != 0.0 or self.force_mask_type or self.detector_mask_ratio != 0.0):
if self.src_with_werdur:
# assert not
src_item_length = int(len(src_item))
if self.append_bos or self.bos_prepended_outside: # origin 8, append_bos: 9
assert src_item_length % 2 == 1
werdur_info = src_item[(src_item_length + 1) // 2:].clone() - 32768
werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:(src_item_length + 1) // 2]
else:
assert src_item_length % 2 == 0
werdur_info = src_item[(src_item_length) // 2:].clone() - 32768
# werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:(src_item_length) // 2]
else:
werdur_info = None
if self.nbest_infer > 1:
src_item_length = int(len(src_item))
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.append_bos or self.bos_prepended_outside: # origin 8, append_bos: 9
src_item = src_item[1:-1] # remove EOS
assert len(src_item) % self.nbest_infer == 0
src_item = torch.reshape(src_item,
[self.nbest_infer, int(len(src_item) / self.nbest_infer)]).transpose(0, 1)
src_item = torch.cat([torch.LongTensor([[bos for iter_i in range(self.nbest_infer)]]), src_item,
torch.LongTensor([[eos for iter_i in range(self.nbest_infer)]])], dim=0)
else:
src_item = src_item[:-1] # remove EOS
assert len(src_item) % self.nbest_infer == 0
src_item = torch.reshape(src_item,
[int(len(src_item) / self.nbest_infer), self.nbest_infer]).transpose(0, 1)
src_item = torch.cat(
[src_item, torch.LongTensor([[eos for iter_i in range(self.nbest_infer)]])], dim=0)
example = {
"id": index,
"source": src_item,
"target": None,
"target_full": None,
"wer_dur": None,
"to_be_edited": None,
"for_wer_gather": None,
"source_phone": None,
"mask_type": torch.zeros(src_item.shape[0]),
"target_ctc": None
}
return example
elif self.force_mask_type == "detector_infer" or self.force_mask_type == "dec_infer":
return self.build_example_for_detector(src_item, index, werdur_info, tgt_item, is_infer=True)
elif self.detector_mask_ratio != 0.0:
return self.build_example_for_detector(src_item, index, werdur_info, tgt_item)
else:
return self.build_example_for_mask(src_item, index, werdur_info, tgt_item)
if self.src_with_werdur:
# assert not
src_item_length = int(len(src_item))
#print(src_item_length, src_item)
if self.append_bos or self.bos_prepended_outside: # origin 8, append_bos: 9
assert src_item_length % 2 == 1
werdur_info = src_item[(src_item_length+1)//2:].clone() - 32768
werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:(src_item_length+1)//2]
else:
assert src_item_length % 2 == 0
werdur_info = src_item[(src_item_length)//2:].clone() - 32768
# werdur_info = torch.cat([torch.LongTensor([1]), werdur_info], dim=-1)
src_item = src_item[:(src_item_length)//2]
to_be_edited = werdur_info.clamp(0, 1)
wer_dur = torch.abs(werdur_info)
assert self.ft_error_distribution is not None
return self.build_example_for_finetune(src_item, index, tgt_item, to_be_edited, wer_dur)
else:
example = {
"id": index,
"source": src_item,
"target": tgt_item,
}
if self.align_dataset is not None:
example["alignment"] = self.align_dataset[index]
if self.constraints is not None:
example["constraints"] = self.constraints[index]
return example
def __len__(self):
return len(self.src)
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
pad_to_length (dict, optional): a dictionary of
{'source': source_pad_to_length, 'target': target_pad_to_length}
to indicate the max length to pad to in source and target respectively.
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one
position for teacher forcing, of shape `(bsz, tgt_len)`.
This key will not be present if *input_feeding* is
``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `src_lang_id` (LongTensor): a long Tensor which contains source
language IDs of each sample in the batch
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
- `tgt_lang_id` (LongTensor): a long Tensor which contains target language
IDs of each sample in the batch
"""
res = collate(
samples,
pad_idx=self.src_dict.pad(),
eos_idx=self.eos,
left_pad_source=self.left_pad_source,
left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding,
pad_to_length=pad_to_length,
pad_to_multiple=self.pad_to_multiple,
)
if self.src_lang_id is not None or self.tgt_lang_id is not None:
src_tokens = res["net_input"]["src_tokens"]
bsz = src_tokens.size(0)
if self.src_lang_id is not None:
res["net_input"]["src_lang_id"] = (
torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens)
)
if self.tgt_lang_id is not None:
res["tgt_lang_id"] = (
torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens)
)
return res
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
if self.src_with_werdur:
return max(
self.src_sizes[index] // 2,
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
else:
return max(
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self)).astype(np.int64)
else:
indices = np.arange(len(self), dtype=np.int64)
if self.buckets is None:
# sort by target length, then source length
if self.tgt_sizes is not None:
indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(self.src_sizes[indices], kind="mergesort")]
else:
# sort by bucketed_num_tokens, which is:
# max(padded_src_len, padded_tgt_len)
return indices[
np.argsort(self.bucketed_num_tokens[indices], kind="mergesort")
]
@property
def supports_prefetch(self):
return getattr(self.src, "supports_prefetch", False) and (
getattr(self.tgt, "supports_prefetch", False) or self.tgt is None
)
def prefetch(self, indices):
self.src.prefetch(indices)
if self.tgt is not None:
self.tgt.prefetch(indices)
if self.align_dataset is not None:
self.align_dataset.prefetch(indices)
def filter_indices_by_size(self, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
return data_utils_sc.filter_paired_dataset_indices_by_size(
self.src_sizes,
self.tgt_sizes,
indices,
max_sizes,
)
| 48,975 | 43.082808 | 194 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/softcorrect/softcorrect_task.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import itertools
import logging
logger = logging.getLogger(__name__)
import torch
from fairseq import utils
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from fairseq.utils import new_arange
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq import tokenizer
from dictionary_sc import Dictionary_sc
from corrector_ds import LanguagePairDataset
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
cal_wer_dur=False,
src_with_werdur=False,
append_eos_to_target=False,
nbest_infer=0,
homophone_dict_path="",
mask_ratio=0.0,
detector_mask_ratio=0.0,
error_distribution=None,
ft_error_distribution=None,
duptoken_error_distribution=None,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
cal_wer_dur=cal_wer_dur,
src_with_werdur=src_with_werdur,
append_eos_to_target=append_eos_to_target,
bos_prepended_outside=prepend_bos,
nbest_infer=nbest_infer,
homophone_dict_path=homophone_dict_path,
mask_ratio=mask_ratio,
detector_mask_ratio=detector_mask_ratio,
error_distribution=error_distribution,
ft_error_distribution=ft_error_distribution,
duptoken_error_distribution=duptoken_error_distribution,
)
@register_task("softcorrect_task")
class SoftcorrectTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument(
'--noise',
default='random_delete',
choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
parser.add_argument(
'--cal-wer-dur', action="store_true", default=False,
help='Whether to cal-wer-dur in dataset')
parser.add_argument(
'--use-wer-dur', action="store_true", default=False,
help='Whether to use wer dur in model')
parser.add_argument(
'--src-with-werdur', action="store_true", default=False,
help='Whether the werdur is in dataset')
parser.add_argument(
'--break-alignment', action="store_true", default=False,
help="break the alignment, using single padding"
)
parser.add_argument("--pos-before-reshape", action="store_true", default=False,
help="whether apply pos embedding before reshape")
parser.add_argument(
"--homophone-dict-path",
type=str,
default="",
help="path to the homophone dict",
)
parser.add_argument(
"--mask-ratio",
type=float,
default=0.0,
help="mask rato",
)
parser.add_argument(
"--detector-mask-ratio",
type=float,
default=0.0,
help="mask ratio of detector",
)
parser.add_argument(
"--error-distribution",
type=str,
default=None,
help="simulated error distribution",
)
parser.add_argument(
"--ft-error-distribution",
type=str,
default=None,
help="error distribution in finetune",
)
parser.add_argument(
"--duptoken-error-distribution",
type=str,
default=None,
help="error distribution of duped token",
)
parser.add_argument(
'--untouch-token-loss', type=float,
default=0.0,
help='lambda of untouch token'
)
parser.add_argument('--pad-first-dictionary', action='store_true',
help='in dictionary, whether pad is index 0')
parser.add_argument(
'--candidate-size', type=int,
default=0,
help='number of candidate number'
)
parser.add_argument(
'--label-leak-prob', type=float,
default=0.0,
help='prob of label leak'
)
parser.add_argument(
'--label-ignore-prob', type=float,
default=0.0,
help='prob of ignore label (predict nota) when wrong'
)
parser.add_argument(
'--bert-generator-encoder-model-path', type=str,
default="",
help='path of bert_generator encoder model'
)
parser.add_argument(
'--main-encoder-warmup-path', type=str,
default="",
help='path of warmup main encoder model'
)
parser.add_argument(
'--nbest-void-insert-ratio', type=float,
default=0.0,
help='ratio of adding rand <void> into nbest input'
)
parser.add_argument(
'--nbest-input-num', type=int,
default=1,
help='number of nbest input'
)
parser.add_argument(
'--nbest-input-sample-temp', type=float,
default=1.0,
help='temperature of sampling nbest input'
)
parser.add_argument(
'--nbest-input-sample-untouch-temp', type=float,
default=-1.0,
help='temperature of sampling nbest input if the source toke is unnoised'
)
parser.add_argument(
'--encoder-training-type', type=str,
default="detector",
help='loss type of encoder training'
)
parser.add_argument(
'--force-same-ratio', type=float,
default=0.0,
help='nbest ratio that is forced to same'
)
parser.add_argument(
'--emb-dropout', type=float, default=-1.0,
help='dropout of encoder embedding dropout'
)
parser.add_argument(
'--same-also-sample', type=float, default=0.0,
help='same also sample'
)
# fmt: on
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
prepend_bos=True,
cal_wer_dur=self.args.cal_wer_dur,
src_with_werdur=self.args.src_with_werdur,
append_eos_to_target=self.args.cal_wer_dur , # add this although eos already add in data preprocess
homophone_dict_path=self.args.homophone_dict_path,
mask_ratio=self.args.mask_ratio,
detector_mask_ratio=self.args.detector_mask_ratio,
error_distribution=(None if not self.args.error_distribution else [float(i) for i in self.args.error_distribution.split(",")]),
ft_error_distribution=(None if not self.args.ft_error_distribution else [float(i) for i in self.args.ft_error_distribution.split(",")]),
duptoken_error_distribution=(None if not self.args.duptoken_error_distribution else [float(i) for i in self.args.duptoken_error_distribution.split(",")]),
)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang)), pad_first=args.pad_first_dictionary
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang)), pad_first=args.pad_first_dictionary
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
@classmethod
def load_dictionary(cls, filename, pad_first=False):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary_sc.load(filename, pad_first=pad_first)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary_sc()
for filename in filenames:
Dictionary_sc.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0
)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True
)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = (
2
+ (
(target_length - 2)
* target_score.new_zeros(target_score.size(0), 1).uniform_()
).long()
)
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = (
target_tokens.gather(1, target_rank)
.masked_fill_(target_cutoff, pad)
.gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1])
)
prev_target_tokens = prev_target_tokens[
:, : prev_target_tokens.ne(pad).sum(1).max()
]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = (
target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos)
)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk
)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = (
target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad)
)
return target_tokens.masked_fill(~target_mask, unk)
if self.args.noise == "random_delete":
return _random_delete(target_tokens)
elif self.args.noise == "random_mask":
return _random_mask(target_tokens)
elif self.args.noise == "full_mask":
return _full_mask(target_tokens)
elif self.args.noise == "no_noise":
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args, **unused):
# add models input to match the API for SequenceGenerator
from softcorrect_corrector_generator import SoftcorrectCorrectorGenerator
# print("edit_thre:", getattr(args, "edit_thre", 0.0))
return SoftcorrectCorrectorGenerator(
self.target_dictionary,
eos_penalty=getattr(args, "iter_decode_eos_penalty", 0.0),
max_iter=getattr(args, "iter_decode_max_iter", 10),
beam_size=getattr(args, "iter_decode_with_beam", 1),
reranking=getattr(args, "iter_decode_with_external_reranker", False),
decoding_format=getattr(args, "decoding_format", None),
adaptive=not getattr(args, "iter_decode_force_max_iter", False),
retain_history=getattr(args, "retain_iter_history", False),
edit_thre=getattr(args, "edit_thre", 0.0),
print_werdur=getattr(args, "print_werdur", False),
retain_dropout=getattr(args, "retain_dropout", False)
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None, nbest_infer=0, force_mask_type=None, duptoken_error_distribution=None):
if constraints is not None:
# Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/
raise NotImplementedError(
"Constrained decoding with the translation_lev task is not supported"
)
return LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary, append_bos=True, homophone_dict_path="", nbest_infer=nbest_infer, force_mask_type=force_mask_type, duptoken_error_distribution=duptoken_error_distribution
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None, werdur_gt_str="", force_mask_type=None, duptoken_error_distribution=None
):
with torch.no_grad():
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, constraints=constraints, werdur_gt_str=werdur_gt_str, force_mask_type=force_mask_type
) # can use try except to prevent paramter error
| 21,359 | 36.408056 | 216 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/softcorrect/softcorrect_model.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_encoder, ensemble_decoder, FairseqNATEncoder
from fairseq.models.transformer import Embedding, TransformerModel, TransformerEncoder, TransformerDecoder
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.modules import FairseqDropout
from fairseq.modules import PositionalEmbedding
from fairseq.models.fairseq_model import BaseFairseqModel
from torch import Tensor
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch import nn
import math
from fairseq.models.fairseq_encoder import EncoderOut
from typing import Any, Dict, List, Optional, Tuple
import logging
logger = logging.getLogger(__name__)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
def Embeddingright(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
@register_model("softcorrect_corrector")
class SoftcorrectCorrectorModel(BaseFairseqModel):
def __init__(self, args, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
assert isinstance(self.encoder, FairseqEncoder)
assert isinstance(self.decoder, FairseqDecoder) or self.decoder is None
self.args = args
self.supports_align_args = True
self.tgt_dict = decoder.dictionary if decoder else None
if not decoder:
print("Decoder not exit! Using bos eos from encoder!")
self.bos = encoder.dictionary.bos()
self.eos = encoder.dictionary.eos()
self.pad = encoder.dictionary.pad()
self.unk = encoder.dictionary.unk()
try:
self.gttoken = encoder.dictionary.gttoken()
except:
self.gttoken = None
else:
self.bos = decoder.dictionary.bos()
self.eos = decoder.dictionary.eos()
self.pad = decoder.dictionary.pad()
self.unk = decoder.dictionary.unk()
try:
self.gttoken = decoder.dictionary.gttoken()
except:
self.gttoken = None
self.ensemble_models = None
if getattr(args, 'remove_edit_emb', False):
print("Remove edit emb!")
self.remove_edit_emb = True
else:
self.remove_edit_emb = False
self.to_be_edited_size = getattr(args, "to_be_edited_size", 1)
self.padding_idx = self.encoder.padding_idx
if getattr(args, 'assist_edit_loss', False):
print("add assist edit loss!")
self.assist_edit_loss = True
else:
self.assist_edit_loss = False
self.werdur_max_predict = getattr(args, 'werdur_max_predict', 5.0)
print("werdur_max_predict: ", self.werdur_max_predict)
self.werdur_loss_type = getattr(args, 'werdur_loss_type', 'l2')
print("werdur_loss_type: ", self.werdur_loss_type)
if self.werdur_loss_type == 'l2':
self.werdur_loss_func = F.mse_loss
elif self.werdur_loss_type == 'log_l2':
self.werdur_loss_func = self.log_mse_loss
elif self.werdur_loss_type == 'l1':
self.werdur_loss_func = F.l1_loss
elif self.werdur_loss_type == 'log_l1':
self.werdur_loss_func = self.log_l1_loss
else:
raise ValueError("Unsupported werdur_loss_type")
self.encoder_embed_dim = args.encoder_embed_dim
self.source_dup_factor = getattr(args, "source_dup_factor", -1)
self.mask_ratio = getattr(args, "mask_ratio", 0.0)
self.detector_mask_ratio = getattr(args, "detector_mask_ratio", 0.0)
self.ft_error_distribution = getattr(args, "ft_error_distribution", None)
self.duptoken_error_distribution = getattr(args, "duptoken_error_distribution", None)
if self.ft_error_distribution:
assert not self.detector_mask_ratio
assert not self.mask_ratio
if self.mask_ratio != 0.0 or self.ft_error_distribution:
self.output_projection = torch.nn.Linear(
self.encoder_embed_dim, len(self.encoder.dictionary), bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=self.encoder_embed_dim ** -0.5
)
else:
self.output_projection = None
if self.detector_mask_ratio != 0.0:
self.detector_projection = torch.nn.Sequential(
torch.nn.Linear(self.encoder_embed_dim * 2, self.encoder_embed_dim * 2),
torch.nn.LayerNorm(self.encoder_embed_dim * 2),
torch.nn.GLU(),
torch.nn.Linear(self.encoder_embed_dim, self.encoder_embed_dim * 2),
torch.nn.LayerNorm(self.encoder_embed_dim * 2),
torch.nn.GLU(),
torch.nn.Linear(self.encoder_embed_dim, 1),
)
else:
self.detector_projection = None
# self.phone_embedding = None
self.untouch_token_loss = getattr(args, "untouch_token_loss", 0.0)
@property
def allow_ensemble(self):
return True
def enable_ensemble(self, models):
self.encoder.ensemble_models = [m.encoder for m in models]
self.decoder.ensemble_models = [m.decoder for m in models]
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
encoder = SoftCorrectEncoder(args, src_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
encoder.apply(init_bert_params)
return encoder
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
**kwargs,
):
"""
Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model
file. Downloads and caches the pre-trained model file if needed.
The base implementation returns a
:class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to
generate translations or sample from language models. The underlying
:class:`~fairseq.models.FairseqModel` can be accessed via the
*generator.models* attribute.
Other models may override this to implement custom hub interfaces.
Args:
model_name_or_path (str): either the name of a pre-trained model to
load or a path/URL to a pre-trained model state dict
checkpoint_file (str, optional): colon-separated list of checkpoint
files in the model archive to ensemble (default: 'model.pt')
data_name_or_path (str, optional): point args.data to the archive
at the given path/URL. Can start with '.' or './' to reuse the
model archive path.
"""
import hub_utils_fc
x = hub_utils_fc.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
**kwargs,
)
cls.upgrade_args(x["args"])
logger.info(x["args"])
return hub_utils_fc.GeneratorHubInterface(x["args"], x["task"], x["models"])
@torch.jit.export
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
features = self.decoder.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return features
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
if not self.decoder:
return (self.encoder.max_positions(), self.encoder.max_positions())
else:
return (self.encoder.max_positions(), self.decoder.max_positions())
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
def log_mse_loss(self, hypo, ref, reduction='none'):
hypo = torch.exp(hypo) - 1.0
return F.mse_loss(hypo, ref, reduction=reduction)
def clipped_mse_loss(self, hypo, ref, reduction='none'):
assert reduction == 'none'
mse_loss = F.mse_loss(hypo, ref, reduction=reduction)
mse_loss_pad = (mse_loss > self.clip_loss_thre ** 2).type_as(hypo).detach()
return mse_loss * mse_loss_pad
def log_l1_loss(self, hypo, ref, reduction='none'):
hypo = torch.exp(hypo) - 1.0
return F.l1_loss(hypo, ref, reduction=reduction)
@property
def allow_length_beam(self):
return True
@staticmethod
def add_args(parser):
#TransformerModel.add_args(parser)
FairseqNATModel.add_args(parser)
# length prediction
parser.add_argument(
"--src-embedding-copy",
action="store_true",
help="copy encoder word embeddings as the initial input of the decoder",
)
parser.add_argument(
"--src-embedding-copy-exp",
action="store_true",
help="copy encoder word embeddings as the initial input of the decoder in exponontial way",
)
parser.add_argument(
"--remove-edit-emb",
action="store_true",
default=False,
help="whether to remove edit emb",
)
parser.add_argument(
"--assist-edit-loss",
action="store_true",
default=False,
help="whether to use assist edit loss",
)
parser.add_argument(
"--pred-length-offset",
action="store_true",
help="predicting the length difference between the target and source sentences",
)
parser.add_argument(
"--sg-length-pred",
action="store_true",
help="stop the gradients back-propagated from the length predictor",
)
parser.add_argument(
"--length-loss-factor",
type=float,
help="weights on the length prediction loss",
)
parser.add_argument(
"--edit-emb-dim",
type=int,
help="dimension of edit emb",
)
parser.add_argument(
"--to-be-edited-size",
type=int,
help="size of to be edited (2 for edited or not, 4 or insert/delete/change/not do",
)
parser.add_argument(
"--werdur-max-predict",
type=float,
help="dimension of edit emb",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
# if getattr(args, "mask_ratio", 0.0) != 0.0 or getattr(args, "detector_mask_ratio", 0.0) != 0.0 or getattr(args, "ft_error_distribution", None) is not None:
return None
def _compute_nll_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0, skip_cloest=False, return_acc=False
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
nll_loss_closest = None
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
if return_acc:
acc = torch.tensor(0.0)
else:
acc = None
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
if return_acc:
acc = (logits.max(-1)[1] == targets).float().mean()
else:
acc = None
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
assert not return_acc
#nll_loss_closest = losses.float().type_as(losses).detach()
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor, "acc": acc}, nll_loss_closest
def _compute_binary_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0,
return_acc=False
):
"""
outputs: batch x len
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
if return_acc:
acc = torch.tensor(0.0)
else:
acc = None
else:
losses = F.binary_cross_entropy_with_logits(outputs, targets.to(outputs.device).type_as(outputs), reduction="none")
if return_acc:
acc = ((outputs >= 0).long() == targets).float().mean()
else:
acc = None
# nll_loss_closest = losses.float().type_as(losses).detach()
nll_loss = mean_ds(losses)
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor, "acc": acc}
def _compute_ctc_loss(
self, outputs, output_masks, targets, masks, blank, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
outputs_len = output_masks.sum(-1)
targets_len = masks.sum(-1)
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs.float(), dim=-1)
#logits = F.log_softmax(outputs, dim=-1)
logits = logits.transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = torch.nn.functional.ctc_loss(logits, targets.to(logits.device), outputs_len, targets_len, blank=blank, reduction='mean', zero_infinity=True)
loss = loss * factor
#loss = loss.type_as(outputs)
return {"name": name, "loss": loss, "nll_loss": torch.Tensor([0.0])[0].to(logits.device), "factor": factor}, torch.Tensor([0.0])[0].to(logits.device)
def forward_encoder(self, encoder_inputs):
if len(encoder_inputs) == 3:
src_tokens, src_lengths, mask_type = encoder_inputs
else:
src_tokens, src_lengths = encoder_inputs
mask_type = None
attn_mask = None
if mask_type is not None:
source_nonpadding = (src_tokens != self.pad)
source_token = src_tokens * (source_nonpadding.long())
source_embedding = (self.encoder.embed_tokens(source_token) * (source_token != 0).long()[:, :, None])
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, token_embeddings=source_embedding)
if self.detector_projection is not None:
detector_out = self.detector_projection(
torch.cat([encoder_out.encoder_out.transpose(0, 1), encoder_out.encoder_embedding], dim=-1)).squeeze(-1)
return detector_out
else:
encoder_out = self.output_projection(encoder_out.encoder_out.transpose(0, 1))
return torch.log_softmax(encoder_out, dim=-1)
else:
phone_feat = None
return self.encoder(src_tokens, src_lengths=src_lengths, phone_feat=phone_feat)
def forward_mlm(self, src_tokens, src_lengths, tgt_tokens, tgt_tokens_full, tgt_tokens_ctc, mask_type, attn_mask):
assert mask_type is not None
source_nonpadding = (src_tokens != self.pad)
untouched_token = (mask_type == 0) * source_nonpadding
identity_token = (mask_type == 1) * source_nonpadding
mask_token = (mask_type == 2) * source_nonpadding
homophone_token = (mask_type == 3) * source_nonpadding
ctc_token = (mask_type == 4) * source_nonpadding # reserve for future work
random_token = (mask_type == 5) * source_nonpadding
need_detect = (untouched_token + identity_token + homophone_token + random_token).bool()
detect_label = (random_token + homophone_token).bool().long()
source_token = src_tokens * (source_nonpadding.long())
source_embedding = (self.encoder.embed_tokens(source_token) * (source_token != 0).long()[:, :, None])
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, token_embeddings=source_embedding)
if self.detector_projection is not None:
detector_out = self.detector_projection(torch.cat([encoder_out.encoder_out.transpose(0, 1), encoder_out.encoder_embedding], dim=-1)).squeeze(-1)
detector_loss = self._compute_binary_loss(
detector_out,
detect_label,
need_detect,
self.args.label_smoothing,
name="Binary-loss",
factor=1.0,
return_acc=True
)
else:
detector_loss = None
encoder_out = self.output_projection(encoder_out.encoder_out.transpose(0, 1))
ctc_gt_part = torch.zeros_like(encoder_out) + torch.nn.functional.one_hot(tgt_tokens_full, num_classes=encoder_out.shape[-1]) * 30
encoder_out_ctc = encoder_out * ctc_token[:, :, None] + ctc_gt_part.detach() * ~ctc_token[:, :, None]
result_dict = {}
# prepare ctc probability!!!!!!!!!!!!
if self.ft_error_distribution is not None or (self.mask_ratio != 0.0 and self.detector_mask_ratio == 0.0): #bert generator has no ctc loss
result_dict["cons_ctc_loss"], _ = self._compute_ctc_loss(
encoder_out_ctc,
src_tokens.ne(0),
tgt_tokens_ctc,
tgt_tokens_ctc.ne(self.pad),
blank=0,
name="ConsCTC" + "-loss",
factor=1.0,
)
result_dict["identity_loss"], _ = self._compute_nll_loss(
encoder_out,
tgt_tokens_full,
identity_token,
self.args.label_smoothing,
name="Id-loss",
factor=1.0,
skip_cloest=True,
return_acc=True
)
result_dict["mask_loss"], _ = self._compute_nll_loss(
encoder_out,
tgt_tokens_full,
mask_token,
self.args.label_smoothing,
name="Mask-loss",
factor=1.0,
skip_cloest=True,
return_acc=True
)
result_dict["homophone_loss"], _ = self._compute_nll_loss(
encoder_out,
tgt_tokens_full,
homophone_token.bool(),
self.args.label_smoothing,
name="Homophone-loss",
factor=1.0,
skip_cloest=True,
return_acc=True
)
result_dict["random_loss"], _ = self._compute_nll_loss(
encoder_out,
tgt_tokens_full,
random_token.bool(),
self.args.label_smoothing,
name="Random-loss",
factor=1.0,
skip_cloest=True,
return_acc=True
)
if self.untouch_token_loss > 0.0:
result_dict["untouch_loss"], _ = self._compute_nll_loss(
encoder_out,
tgt_tokens_full,
untouched_token.bool(),
self.args.label_smoothing,
name="Untouch-loss",
factor=self.untouch_token_loss,
skip_cloest=True,
return_acc=True
)
if detector_loss is not None:
result_dict["detector_loss"] = detector_loss
return result_dict
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, wer_dur=None, to_be_edited=None, for_wer_gather=None,
closest_label=None, source_phone=None, mask_type=None, target_full=None, target_ctc=None, **kwargs
):
# encoding
attn_mask = None
return self.forward_mlm(src_tokens, src_lengths, tgt_tokens, mask_type=mask_type, tgt_tokens_full=target_full, tgt_tokens_ctc=target_ctc, attn_mask=attn_mask)
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, source_phone=None, **kwargs):
step = decoder_out.step
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder
output_masks = output_tokens.ne(self.pad).bool()
assert output_tokens[0].ne(self.pad).long().sum() == output_tokens.shape[1]
_tokens = self.decoder(
normalize=True, #normalize=True
prev_output_tokens=output_tokens,
encoder_out=encoder_out,
step=step,
source_phone=source_phone
)
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=_tokens,
output_scores=None,
attn=None,
history=history,
)
def regenerate_length_beam(self, decoder_out, beam_size):
output_tokens = decoder_out.output_tokens
length_tgt = output_tokens.ne(self.pad).sum(1)
length_tgt = (
length_tgt[:, None]
+ utils.new_arange(length_tgt, 1, beam_size)
- beam_size // 2
)
length_tgt = length_tgt.view(-1).clamp_(min=2)
max_length = length_tgt.max()
idx_length = utils.new_arange(length_tgt, max_length)
initial_output_tokens = output_tokens.new_zeros(
length_tgt.size(0), max_length
).fill_(self.pad)
initial_output_tokens.masked_fill_(
idx_length[None, :] < length_tgt[:, None], self.unk
)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(decoder_out.output_scores)
return decoder_out._replace(
output_tokens=initial_output_tokens, output_scores=initial_output_scores
)
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1, eps=1e-12):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=eps)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class SoftCorrectEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if getattr(args, "emb_dropout", -1) != -1:
print("Setting word embedding dropout of encoder to {}".format(args.emb_dropout))
self.dropout_module = FairseqDropout(
args.emb_dropout, module_name=self.__class__.__name__
)
else:
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.ensemble_models = None
embed_dim = embed_tokens.embedding_dim
self.pos_before_reshape = getattr(args, "pos_before_reshape", False)
self.nbest_input_num = getattr(args, "nbest_input_num", 1)
if self.nbest_input_num != 1:
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim * self.nbest_input_num if self.pos_before_reshape else embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
else:
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if self.nbest_input_num != 1:
self.nbest_reshape = nn.Linear(args.nbest_input_num * embed_dim, embed_dim, bias=False)
else:
self.nbest_reshape = None
@ensemble_encoder
def forward(
self,
src_tokens,
src_lengths,
attn_mask=None,
phone_feat=None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
namedtuple:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings, phone_feat=phone_feat)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
if len(src_tokens.shape) == 3:
encoder_padding_mask = src_tokens[:, :, 0].eq(self.padding_idx)
else:
encoder_padding_mask = src_tokens.eq(self.padding_idx)
encoder_states = [] if return_all_hiddens else None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask, attn_mask=attn_mask)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
return EncoderOut(
encoder_out=x, # T x B x C
encoder_padding_mask=encoder_padding_mask, # B x T
encoder_embedding=encoder_embedding, # B x T x C
encoder_states=encoder_states, # List[T x B x C]
src_tokens=None,
src_lengths=None,
)
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None, phone_feat: Optional[torch.Tensor] = None,
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if len(src_tokens.shape) == 2:
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
else:
if len(token_embedding.shape) == 3:
x = embed + self.embed_positions(src_tokens[:, :, 0])
else:
assert self.nbest_reshape is not None
if self.pos_before_reshape:
if self.embed_positions is not None:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
x = x + self.embed_positions(src_tokens[:, :, 0])
x = self.nbest_reshape(x)
else:
x = self.nbest_reshape(x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]))
if self.embed_positions is not None:
x = x + self.embed_positions(src_tokens[:, :, 0])
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
@register_model_architecture(
"softcorrect_corrector", "softcorrect_corrector"
)
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
class Swish(torch.nn.Module):
"""Construct an Swish object."""
def forward(self, x):
"""Return Swich activation function."""
return x * torch.sigmoid(x)
@register_model("softcorrect_detector")
class SoftcorrectDetectorModel(SoftcorrectCorrectorModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.candidate_size = getattr(args, "candidate_size", 0)
self.label_leak_prob = getattr(args, "label_leak_prob", 0.0)
self.label_ignore_prob = getattr(args, "label_ignore_prob", 0.0)
self.output_projection = None
if self.detector_mask_ratio != 0.0 and self.candidate_size == 0:
self.detector_projection = torch.nn.Sequential(
torch.nn.Linear(self.encoder_embed_dim * 2, self.encoder_embed_dim * 2),
torch.nn.LayerNorm(self.encoder_embed_dim * 2),
torch.nn.GLU(),
torch.nn.Linear(self.encoder_embed_dim, self.encoder_embed_dim * 2),
torch.nn.LayerNorm(self.encoder_embed_dim * 2),
torch.nn.GLU(),
torch.nn.Linear(self.encoder_embed_dim, 1),
)
else:
self.detector_projection = None
self.encoder_training_type = getattr(args, "encoder_training_type", "detector")
self.emb_dropout = getattr(args, "emb_dropout", -1)
if self.candidate_size == -1:
right_pos_before_reshape = args.pos_before_reshape
args.pos_before_reshape = False
layernorm_embedding = getattr(args, "layernorm_embedding", False)
encoder_layers = args.encoder_layers
args.encoder_layers = 12
args.layernorm_embedding = False
args.emb_dropout = -1 # saved in self.emb_dropout
self.bert_generator_encoder = self.build_encoder(args=args, src_dict=self.encoder.dictionary, embed_tokens=Embedding(len(self.encoder.dictionary), self.encoder.embed_tokens.weight.shape[1], self.encoder.dictionary.pad()))
self.bert_generator_output_projection = torch.nn.Linear(
self.encoder_embed_dim, len(self.encoder.dictionary), bias=False
)
for param in self.bert_generator_encoder.parameters():
param.requires_grad = False
for param in self.bert_generator_output_projection.parameters():
param.requires_grad = False
args.encoder_layers = encoder_layers
args.pos_before_reshape = right_pos_before_reshape
args.emb_dropout = self.emb_dropout
args.layernorm_embedding = layernorm_embedding
else:
assert self.candidate_size == 0, "candidate_size must in [-1, 0]"
self.bert_generator_encoder = None
self.bert_generator_output_projection = None
self.nbest_input_num = getattr(args, "nbest_input_num", 1)
self.nbest_void_insert_ratio = getattr(args, "nbest_void_insert_ratio", 0.0)
self.nbest_input_sample_temp = getattr(args, "nbest_input_sample_temp", 1.0)
self.nbest_input_sample_untouch_temp = getattr(args, "nbest_input_sample_untouch_temp", -1.0)
# if self.nbest_input_sample_untouch_temp:
# print("Sampling temperature of touch/untouch is {}/{}!".format(self.nbest_input_sample_temp, self.nbest_input_sample_untouch_temp))
self.force_same_ratio = getattr(args, "force_same_ratio", 0.0)
assert self.encoder.dictionary.gttoken() + 1 == len(self.encoder.dictionary), "gttoken log require this"
self.same_also_sample = getattr(args, "same_also_sample", 0.0)
if self.same_also_sample == 0.0:
self.same_also_sample = False
def cal_acc(
self, flags, masks=None, name="loss",
):
if masks is not None:
flags = flags[masks]
if masks is not None and not masks.any():
acc = torch.tensor(0.0)
else:
acc = flags.float().mean()
return {"name": name, "loss": torch.tensor(0), "nll_loss": torch.tensor(0), "factor": 1.0,
"acc": acc, "log_acc_only": True}
def _compute_nll_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0, skip_cloest=False,
return_acc=False, log_acc_only=False, input_is_label=False, ignore_gttoken=False
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
if ignore_gttoken:
assert not input_is_label
# if input_is_label:
# assert not ignore_gttoken
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if not skip_cloest:
logits_clo = F.log_softmax(outputs, dim=-1)
losses_clo = F.nll_loss(logits_clo.transpose(1,2), targets.to(outputs.device), reduction="none")
masks_clo = masks.float()
losses_clo = (losses_clo * masks_clo).sum(-1) / masks_clo.sum(-1)
nll_loss_closest = losses_clo.type_as(outputs).detach()
else:
nll_loss_closest = None
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
if return_acc:
acc = torch.tensor(0.0)
else:
acc = None
else:
if log_acc_only:
assert return_acc
if ignore_gttoken:
acc = (outputs[:, :-1].max(-1)[1] == targets).float().mean()
else:
acc = (outputs.max(-1)[1] == targets).float().mean()
return {"name": name, "loss": torch.tensor(0), "nll_loss": torch.tensor(0), "factor": factor, "acc": acc,
"log_acc_only": True}, nll_loss_closest
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
if return_acc:
acc = (logits.max(-1)[1] == targets).float().mean()
else:
acc = None
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
assert not return_acc
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor, "acc": acc, "log_acc_only": log_acc_only}, nll_loss_closest
def forward_encoder(self, encoder_inputs):
if len(encoder_inputs) == 4:
src_tokens, src_lengths, mask_type, return_origin = encoder_inputs
elif len(encoder_inputs) == 3:
src_tokens, src_lengths, mask_type = encoder_inputs
return_origin = False
else:
src_tokens, src_lengths = encoder_inputs
mask_type = None
return_origin = False
attn_mask = None
if mask_type is not None:
source_nonpadding = (src_tokens != self.pad)
if self.nbest_input_num != 1:
#assert self.phone_embedding is None
assert (mask_type == 4).long().sum() == 0.0
source_embedding = self.encoder.embed_tokens(src_tokens) * source_nonpadding.long()[:, :, :, None]
else:
dup_token = (mask_type == 4) * source_nonpadding
source_dup = src_tokens * ((mask_type == 4).long() * source_nonpadding.long())
assert source_dup.sum() == 0.0
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, token_embeddings=source_embedding, attn_mask=attn_mask)
if return_origin:
return encoder_out
if self.detector_projection is not None:
detector_out = self.detector_projection(
torch.cat([encoder_out.encoder_out.transpose(0, 1), encoder_out.encoder_embedding], dim=-1)).squeeze(-1)
return detector_out
elif self.output_projection is None:
encoder_out = F.linear(encoder_out.encoder_out.transpose(0, 1), self.encoder.embed_tokens.weight)
return torch.log_softmax(encoder_out, dim=-1)
else:
encoder_out = self.output_projection(encoder_out.encoder_out.transpose(0, 1))
return torch.log_softmax(encoder_out, dim=-1)
return self.encoder(src_tokens, src_lengths=src_lengths)
def forward_bert_and_prepare_tgt(self, src_tokens, src_lengths, tgt_tokens, tgt_tokens_full, tgt_tokens_ctc, mask_type, bert_generator_attn_mask=None, return_input_only=False):
assert mask_type is not None
source_nonpadding = (src_tokens != self.pad)
batch_size, time_length = source_nonpadding.shape
untouched_token = (mask_type == 0) * source_nonpadding
identity_token = (mask_type == 1) * source_nonpadding
mask_token = (mask_type == 2) * source_nonpadding
homophone_token = (mask_type == 3) * source_nonpadding
dup_token = (mask_type == 4) * source_nonpadding
random_token = (mask_type == 5) * source_nonpadding
inserted_token = (mask_type == 6) * source_nonpadding
assert dup_token.long().sum() == 0
source_token = src_tokens * ((mask_type != 4).long() * source_nonpadding.long())
source_embedding = (self.bert_generator_encoder.embed_tokens(source_token) * (source_token != 0).long()[:, :, None])
bert_generator_encoder_out = self.bert_generator_encoder(src_tokens, src_lengths=src_lengths, token_embeddings=source_embedding)
bert_generator_encoder_out = self.bert_generator_output_projection(bert_generator_encoder_out.encoder_out.transpose(0, 1))
bert_generator_encoder_out = bert_generator_encoder_out * source_nonpadding[:, :, None]
bert_generator_encoder_out_raw = bert_generator_encoder_out
bert_generator_acc_dict = {}
bert_generator_acc_dict["bert_generator_identity_loss"], _ = self._compute_nll_loss(
bert_generator_encoder_out_raw,
tgt_tokens_full,
identity_token,
self.args.label_smoothing,
name="BERT_Id-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=True,
)
bert_generator_acc_dict["bert_generator_mask_loss"], _ = self._compute_nll_loss(
bert_generator_encoder_out_raw,
tgt_tokens_full,
mask_token,
self.args.label_smoothing,
name="BERT_Mask-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=True,
)
bert_generator_acc_dict["bert_generator_homophone_loss"], _ = self._compute_nll_loss(
bert_generator_encoder_out_raw,
tgt_tokens_full,
homophone_token.bool(),
self.args.label_smoothing,
name="BERT_Homo-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=True,
)
bert_generator_acc_dict["bert_generator_random_loss"], _ = self._compute_nll_loss(
bert_generator_encoder_out_raw,
tgt_tokens_full,
random_token.bool(),
self.args.label_smoothing,
name="BERT_Rand-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=True,
)
bert_generator_acc_dict["bert_generator_untouch_loss"], _ = self._compute_nll_loss(
bert_generator_encoder_out_raw,
tgt_tokens_full,
untouched_token.bool(),
self.args.label_smoothing,
name="BERT_Untouch-loss",
factor=self.untouch_token_loss,
skip_cloest=True,
return_acc=True,
log_acc_only=True,
)
_, bert_generator_encoder_out_label = bert_generator_encoder_out.max(dim=-1)
vocab_size = bert_generator_encoder_out.shape[-1]
if self.nbest_input_num == 1:
final_corrupt_token = bert_generator_encoder_out_label * source_nonpadding
assert self.encoder_training_type != "bert"
token_to_mask_flag = None
else:
final_corrupt_token_same = bert_generator_encoder_out_label[:, :, None].repeat(1, 1, self.nbest_input_num) * source_nonpadding[:, :, None]
void_token_prob = F.one_hot(torch.full((1, 1), self.encoder.dictionary.void(), dtype=src_tokens.dtype).type_as(src_tokens), num_classes=vocab_size)
if self.nbest_input_sample_untouch_temp == -1.0:
adjusted_prob = torch.softmax(bert_generator_encoder_out.float() / self.nbest_input_sample_temp, dim=-1)
elif self.nbest_input_sample_untouch_temp == 0.0:
adjusted_prob_noise = torch.softmax(bert_generator_encoder_out.float() / self.nbest_input_sample_temp, dim=-1)
adjusted_prob_untouch = F.one_hot(bert_generator_encoder_out.max(-1)[1], num_classes=vocab_size).type_as(adjusted_prob_noise)
adjusted_prob = torch.where(
(mask_token + homophone_token + inserted_token + random_token + dup_token)[:, :, None],
adjusted_prob_noise, adjusted_prob_untouch)
else:
adjusted_prob_noise = torch.softmax(bert_generator_encoder_out.float() / self.nbest_input_sample_temp, dim=-1)
adjusted_prob_untouch = torch.softmax(bert_generator_encoder_out.float() / self.nbest_input_sample_untouch_temp, dim=-1)
adjusted_prob = torch.where((mask_token + homophone_token + inserted_token + random_token + dup_token)[:, :, None], adjusted_prob_noise, adjusted_prob_untouch)
if self.same_also_sample:
if self.same_also_sample != -1:
assert self.same_also_sample > 0
forsame_prob = torch.softmax(bert_generator_encoder_out.float() / self.same_also_sample, dim=-1)
final_corrupt_token_same = torch.multinomial(
forsame_prob.reshape([batch_size * time_length, vocab_size]),
1, replacement=True).reshape([batch_size, time_length, 1]).repeat(1, 1, self.nbest_input_num) * source_nonpadding[:, :, None]
else:
final_corrupt_token_same = torch.multinomial(adjusted_prob.reshape([batch_size * time_length, vocab_size]),
1, replacement=True).reshape([batch_size, time_length, 1]).repeat(1, 1, self.nbest_input_num) * source_nonpadding[:, :, None]
void_token_prob = self.nbest_void_insert_ratio * void_token_prob + 2.0 * self.nbest_void_insert_ratio * void_token_prob * inserted_token[:, :, None]
if self.nbest_input_sample_untouch_temp == 0.0:
void_token_prob = void_token_prob * ~(untouched_token + identity_token)[:, :, None]
final_corrupt_token_diff = torch.multinomial(adjusted_prob.reshape([batch_size * time_length, vocab_size]) + void_token_prob.reshape([batch_size * time_length, vocab_size]),
self.nbest_input_num, replacement=True).reshape([batch_size, time_length, self.nbest_input_num])
if self.force_same_ratio == 0.0:
same_diff_var = (mask_token + homophone_token + inserted_token + random_token + dup_token)[:, :, None]
else:
same_diff_var = (torch.rand(final_corrupt_token_diff.shape[0], final_corrupt_token_diff.shape[1]).type_as(bert_generator_encoder_out) < 1.0 - self.force_same_ratio)[:, :, None] * source_nonpadding[:, :, None]
final_corrupt_token = torch.where(same_diff_var, final_corrupt_token_diff, final_corrupt_token_same)
if self.encoder_training_type == "bert":
token_to_mask_flag = (torch.rand(final_corrupt_token.shape[0], final_corrupt_token.shape[1]).type_as(bert_generator_encoder_out) < self.mask_ratio / 2) * source_nonpadding
final_corrupt_token = torch.where(
token_to_mask_flag.bool()[:, :, None],
final_corrupt_token.new_zeros(final_corrupt_token.shape).fill_(self.encoder.dictionary.mask()),
final_corrupt_token
)
else:
token_to_mask_flag = None
if return_input_only:
return final_corrupt_token.detach(), bert_generator_acc_dict, token_to_mask_flag, bert_generator_encoder_out_label
if self.nbest_input_num == 1:
bert_generator_encoder_out_label_right = (bert_generator_encoder_out_label == tgt_tokens_full) * source_nonpadding
bert_generator_encoder_out_label_wrong = (bert_generator_encoder_out_label != tgt_tokens_full) * source_nonpadding
else:
consist_flag = torch.ne(final_corrupt_token,
final_corrupt_token[:, :, 0:1].repeat(1, 1, self.nbest_input_num)).long().sum(-1)
consist_flag = (consist_flag == 0)
right_wrong_flag = torch.eq(final_corrupt_token, tgt_tokens_full[:, :, None].repeat(1, 1, self.nbest_input_num)).long().sum(-1)
bert_generator_encoder_out_label_right = (right_wrong_flag > 0) * source_nonpadding
bert_generator_encoder_out_label_wrong = (right_wrong_flag == 0) * source_nonpadding
bert_generator_acc_dict["bert_generator_diffright_nbest_loss"] = self.cal_acc(
(1.0 - consist_flag.long()) * bert_generator_encoder_out_label_right,
source_nonpadding.bool(),
name="BERT_Diffright_nbest-loss",
)
bert_generator_acc_dict["bert_generator_diffwrong_nbest_loss"] = self.cal_acc(
(1.0 - consist_flag.long()) * bert_generator_encoder_out_label_wrong,
source_nonpadding.bool(),
name="BERT_Diffwrong_nbest-loss",
)
bert_generator_acc_dict["bert_generator_sameright_nbest_loss"] = self.cal_acc(
consist_flag.long() * bert_generator_encoder_out_label_right,
source_nonpadding.bool(),
name="BERT_Sameright_nbest-loss",
)
bert_generator_acc_dict["bert_generator_samewrong_nbest_loss"] = self.cal_acc(
consist_flag.long() * bert_generator_encoder_out_label_wrong,
source_nonpadding.bool(),
name="BERT_Samewrong_nbest-loss",
)
bert_generator_acc_dict["bert_generator_random_nbest_loss"] = self.cal_acc(
bert_generator_encoder_out_label_right,
random_token.bool(),
name="BERT_Rand_nbest-loss",
)
bert_generator_acc_dict["bert_generator_untouch_nbest_loss"] = self.cal_acc(
bert_generator_encoder_out_label_right,
untouched_token.bool(),
name="BERT_Untouch_nbest-loss",
)
bert_generator_acc_dict["bert_generator_homophone_nbest_loss"] = self.cal_acc(
bert_generator_encoder_out_label_right,
homophone_token.bool(),
name="BERT_Homo_nbest-loss",
)
bert_generator_acc_dict["bert_generator_mask_nbest_loss"] = self.cal_acc(
bert_generator_encoder_out_label_right,
mask_token.bool(),
name="BERT_Mask_nbest-loss",
)
bert_generator_acc_dict["bert_generator_identity_nbest_loss"] = self.cal_acc(
bert_generator_encoder_out_label_right,
identity_token.bool(),
name="BERT_Id_nbest-loss",
)
label_leak_var = torch.rand_like(bert_generator_encoder_out_label_right.type_as(bert_generator_encoder_out_raw))
bert_generator_encoder_out_label_right_nonleak = bert_generator_encoder_out_label_right * (label_leak_var >= self.label_leak_prob)
bert_generator_encoder_out_label_right_leak = bert_generator_encoder_out_label_right * (label_leak_var < self.label_leak_prob)
label_ignore_var = torch.rand_like(bert_generator_encoder_out_label_wrong.type_as(bert_generator_encoder_out_raw))
bert_generator_encoder_out_label_wrong_nonignore = bert_generator_encoder_out_label_wrong * (label_ignore_var >= self.label_ignore_prob)
bert_generator_encoder_out_label_wrong_ignore = bert_generator_encoder_out_label_wrong * (label_ignore_var < self.label_ignore_prob)
bert_generator_encoder_out_label_diff_nonignore = None
bert_generator_encoder_out_label_diff_ignore = None
if self.candidate_size == -1:
return final_corrupt_token.detach(), None, None, bert_generator_acc_dict, bert_generator_encoder_out_label_wrong_ignore, \
bert_generator_encoder_out_label_wrong_nonignore, bert_generator_encoder_out_label_right_leak, bert_generator_encoder_out_label_right_nonleak, bert_generator_encoder_out_label_diff_ignore, bert_generator_encoder_out_label_diff_nonignore, bert_generator_encoder_out_label
raise ValueError("Not supported candidate_size")
def forward_mlm(self, src_tokens, src_lengths, tgt_tokens, prev_output_tokens, tgt_tokens_full, tgt_tokens_ctc, mask_type, attn_mask=None):
assert mask_type is not None
if len(src_tokens.shape) == 3:
source_nonpadding = (src_tokens[:, :, 0] != self.pad)
else:
source_nonpadding = (src_tokens != self.pad)
untouched_token = (mask_type == 0) * source_nonpadding
identity_token = (mask_type == 1) * source_nonpadding
mask_token = (mask_type == 2) * source_nonpadding
homophone_token = (mask_type == 3) * source_nonpadding
dup_token = (mask_type == 4) * source_nonpadding
random_token = (mask_type == 5) * source_nonpadding
inserted_token = (mask_type == 6) * source_nonpadding
need_detect = (untouched_token + identity_token + homophone_token + random_token).bool()
detect_label = (random_token + homophone_token).bool().long()
if self.encoder_training_type in ["bert"]:
corrupt_token_input, bert_generator_acc_dict, token_to_mask_flag, bert_generator_encoder_out_label = self.forward_bert_and_prepare_tgt(
src_tokens, src_lengths, tgt_tokens, tgt_tokens_full, tgt_tokens_ctc, mask_type, bert_generator_attn_mask=None, return_input_only=True)
elif self.encoder_training_type in ["detector"]:
corrupt_token_input, candidates, candidates_label, bert_generator_acc_dict, bert_generator_encoder_out_label_wrong_ignore, bert_generator_encoder_out_label_wrong_nonignore, \
bert_generator_encoder_out_label_right_leak, bert_generator_encoder_out_label_right_nonleak, bert_generator_encoder_out_label_diff_ignore, bert_generator_encoder_out_label_diff_nonignore, bert_generator_encoder_out_label = self.forward_bert_and_prepare_tgt(
src_tokens, src_lengths, tgt_tokens, tgt_tokens_full, tgt_tokens_ctc, mask_type, bert_generator_attn_mask=None)
token_to_mask_flag = None
else:
raise ValueError("Impossible encoder_training_type {}!".format(self.encoder_training_type))
if self.nbest_input_num != 1:
corrupt_token_input = corrupt_token_input.masked_fill(~(source_nonpadding[:, :, None].repeat(1, 1, self.nbest_input_num)), self.pad)
source_embedding = self.encoder.embed_tokens(corrupt_token_input) * source_nonpadding.long()[:, :, None, None]
else:
corrupt_token_input = corrupt_token_input.masked_fill(~source_nonpadding, self.pad)
source_embedding = self.encoder.embed_tokens(corrupt_token_input) * source_nonpadding.long()[:, :, None]
encoder_out = self.encoder(corrupt_token_input, src_lengths=src_lengths, token_embeddings=source_embedding, attn_mask=attn_mask)
encoder_out_vocab = F.linear(encoder_out.encoder_out.transpose(0, 1),
self.encoder.embed_tokens.weight) # [B, T, vocab_size]
result_dict = bert_generator_acc_dict
if self.encoder_training_type == "bert":
result_dict["mlm_loss"], _ = self._compute_nll_loss(
encoder_out_vocab,
tgt_tokens_full,
token_to_mask_flag.bool(),
self.args.label_smoothing,
name="mlm-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=False,
)
mask_for_candidate = (mask_token + dup_token + homophone_token + random_token + inserted_token).long() - token_to_mask_flag.long()
mask_for_candidate = (mask_for_candidate > 0)
result_dict["class_loss"], _ = self._compute_nll_loss(
encoder_out_vocab,
tgt_tokens_full,
mask_for_candidate,
self.args.label_smoothing,
name="class-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=False,
)
if self.untouch_token_loss != 0.0:
mask_for_untouch = (identity_token + untouched_token).long() - token_to_mask_flag.long()
mask_for_untouch = (mask_for_untouch > 0)
result_dict["untouch_loss"], _ = self._compute_nll_loss(
encoder_out_vocab,
tgt_tokens_full,
mask_for_untouch,
self.args.label_smoothing,
name="Untouch-loss",
factor=self.untouch_token_loss,
skip_cloest=True,
return_acc=True,
log_acc_only=False,
)
elif self.encoder_training_type in ["detector"]:
vocab_size = encoder_out_vocab.shape[-1]
encoder_out_sgtgt = (1.0 - F.one_hot(tgt_tokens_full.long(), num_classes=vocab_size).type_as(encoder_out_vocab)) * encoder_out_vocab
gttoken_labels = torch.zeros(tgt_tokens_full.shape).fill_(self.gttoken).type_as(tgt_tokens_full)
result_dict["wrong_nonignore_loss"], _ = self._compute_nll_loss(
encoder_out_vocab,
tgt_tokens_full,
bert_generator_encoder_out_label_wrong_nonignore,
self.args.label_smoothing,
name="Wrong_nonignore-loss",
factor=5.0,
skip_cloest=True,
return_acc=True,
log_acc_only=False,
)
result_dict["wrong_nonignore_nogttoken_loss"], _ = self._compute_nll_loss(
encoder_out_vocab,
tgt_tokens_full,
bert_generator_encoder_out_label_wrong_nonignore,
self.args.label_smoothing,
name="Wrong_nonignore_nogttoken-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=True,
ignore_gttoken=True
)
result_dict["wrong_ignore_loss"], _ = self._compute_nll_loss(
encoder_out_sgtgt,
gttoken_labels,
bert_generator_encoder_out_label_wrong_ignore,
self.args.label_smoothing,
name="Wrong_ignore-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=False,
)
assert bert_generator_encoder_out_label_diff_ignore is None
result_dict["right_leak_loss"], _ = self._compute_nll_loss(
encoder_out_vocab,
tgt_tokens_full,
bert_generator_encoder_out_label_right_leak,
self.args.label_smoothing,
name="Right_leak-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=False,
)
result_dict["right_leak_nogttoken_loss"], _ = self._compute_nll_loss(
encoder_out_vocab,
tgt_tokens_full,
bert_generator_encoder_out_label_right_leak,
self.args.label_smoothing,
name="Right_leak_nogttoken-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=True,
ignore_gttoken=True
)
result_dict["right_nonleak_loss"], _ = self._compute_nll_loss(
encoder_out_sgtgt,
gttoken_labels,
bert_generator_encoder_out_label_right_nonleak,
self.args.label_smoothing,
name="Right_nonleak-loss",
factor=1.0,
skip_cloest=True,
return_acc=True,
log_acc_only=False,
)
return result_dict
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, wer_dur=None, to_be_edited=None, for_wer_gather=None,
closest_label=None, source_phone=None, mask_type=None, target_full=None, target_ctc=None, decoder_input=None, encoder_label=None, **kwargs
):
# encoding
attn_mask = None
return self.forward_mlm(src_tokens, src_lengths, tgt_tokens, prev_output_tokens, mask_type=mask_type, tgt_tokens_full=target_full, tgt_tokens_ctc=target_ctc)
@register_model_architecture(
"softcorrect_detector", "softcorrect_detector"
)
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
| 70,019 | 41.53949 | 289 | py |
NeuralSpeech | NeuralSpeech-master/SoftCorrect/softcorrect/softcorrect_loss.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from torch import Tensor
@register_criterion("softcorrect_loss")
class SoftcorrectCorrectorCriterion(FairseqCriterion):
def __init__(self, task, label_smoothing):
super().__init__(task)
self.label_smoothing = label_smoothing
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument(
"--label-smoothing",
default=0.0,
type=float,
metavar="D",
help="epsilon for label smoothing, 0 means no label smoothing",
)
def _compute_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}
def _custom_loss(self, loss, nll_loss=None, name="loss", factor=1.0, acc=None, log_acc_only=False):
if nll_loss is not None:
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor, "acc": acc, "log_acc_only": log_acc_only}
else:
return {"name": name, "loss": loss, "factor": factor, "acc": acc, "log_acc_only": log_acc_only}
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
nsentences, ntokens = sample["nsentences"], sample["ntokens"]
# B x T
src_tokens, src_lengths = (
sample["net_input"]["src_tokens"],
sample["net_input"]["src_lengths"],
)
# if "wer_dur" in sample["net_input"].keys():
wer_dur = sample["net_input"].get("wer_dur", None)
to_be_edited = sample["net_input"].get("to_be_edited", None)
for_wer_gather = sample["net_input"].get("for_wer_gather", None)
closest_label = sample["net_input"].get("closest_label", None)
source_phone = sample["net_input"].get("source_phone", None)
mask_type = sample["net_input"].get("mask_type", None)
target_full = sample["net_input"].get("target_full", None)
target_ctc = sample["net_input"].get("target_ctc", None)
tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"]
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens, wer_dur, to_be_edited, for_wer_gather, closest_label, source_phone, mask_type, target_full, target_ctc)
losses, nll_loss = [], []
for obj in outputs:
if outputs[obj].get("loss", None) is None:
_losses = self._compute_loss(
outputs[obj].get("out"),
outputs[obj].get("tgt"),
outputs[obj].get("mask", None),
outputs[obj].get("ls", 0.0),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
else:
_losses = self._custom_loss(
outputs[obj].get("loss"),
outputs[obj].get("nll_loss", None),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
acc=outputs[obj].get("acc", None),
log_acc_only=outputs[obj].get("log_acc_only", False),
)
losses += [_losses]
if outputs[obj].get("nll_loss", False):
nll_loss += [_losses.get("nll_loss", 0.0)]
loss = sum(l["loss"] for l in losses)
nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0)
# NOTE:
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
for l in losses:
if "log_acc_only" not in l or not l["log_acc_only"]:
logging_output[l["name"]] = (
utils.item(l["loss"].data / l["factor"])
if reduce
else l[["loss"]].data / l["factor"]
)
if "acc" in l and l["acc"] is not None:
if l["name"][-5:] == '-loss':
acc_name = l["name"][:-5] + '_acc-loss'
else:
acc_name = l["name"] + '_acc-loss'
logging_output[acc_name] = (
utils.item(l["acc"].data * math.log(2))
)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs))
metrics.log_scalar(
"loss", loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
for key in logging_outputs[0]:
if key[-5:] == "-loss":
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(
key[:-5],
val / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 7,985 | 37.956098 | 184 | py |
NeuralSpeech | NeuralSpeech-master/BinauralGrad/geowarp.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
import librosa
import numpy as np
import scipy.linalg
from scipy.spatial.transform import Rotation as R
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import soundfile
from binauralgrad.warping import GeometricTimeWarper, MonotoneTimeWarper
mono_fn = sys.argv[1]
position_fn = sys.argv[2]
binaural_fn = sys.argv[3]
def load_position(position_fn):
position_list = []
with open(position_fn, 'r', encoding='utf-8') as infile:
for line in infile.readlines():
line = line.strip().split()
assert len(line) == 7
position_list.append([float(i) for i in line])
return np.array(position_list)
mono_audio, sr = librosa.load(mono_fn, mono=True, sr=None)
position_array = load_position(position_fn=position_fn)
assert len(position_array) * 400 == len(mono_audio)
assert sr == 48000
class GeometricWarper(nn.Module):
def __init__(self, sampling_rate=48000):
super().__init__()
self.warper = GeometricTimeWarper(sampling_rate=sampling_rate)
def _transmitter_mouth(self, view):
# offset between tracking markers and real mouth position in the dataset
mouth_offset = np.array([0.09, 0, -0.20])
quat = view[:, 3:, :].transpose(2, 1).contiguous().detach().cpu().view(-1, 4).numpy()
# make sure zero-padded values are set to non-zero values (else scipy raises an exception)
norms = scipy.linalg.norm(quat, axis=1)
eps_val = (norms == 0).astype(np.float32)
quat = quat + eps_val[:, None]
transmitter_rot_mat = R.from_quat(quat)
transmitter_mouth = transmitter_rot_mat.apply(mouth_offset, inverse=True)
transmitter_mouth = th.Tensor(transmitter_mouth).view(view.shape[0], -1, 3).transpose(2, 1).contiguous()
if view.is_cuda:
transmitter_mouth = transmitter_mouth.cuda()
return transmitter_mouth
def _3d_displacements(self, view):
transmitter_mouth = self._transmitter_mouth(view)
# offset between tracking markers and ears in the dataset
left_ear_offset = th.Tensor([0, -0.08, -0.22]).cuda() if view.is_cuda else th.Tensor([0, -0.08, -0.22])
right_ear_offset = th.Tensor([0, 0.08, -0.22]).cuda() if view.is_cuda else th.Tensor([0, 0.08, -0.22])
# compute displacements between transmitter mouth and receiver left/right ear
displacement_left = view[:, 0:3, :] + transmitter_mouth - left_ear_offset[None, :, None]
displacement_right = view[:, 0:3, :] + transmitter_mouth - right_ear_offset[None, :, None]
displacement = th.stack([displacement_left, displacement_right], dim=1)
return displacement
def _warpfield(self, view, seq_length):
return self.warper.displacements2warpfield(self._3d_displacements(view), seq_length)
def forward(self, mono, view):
'''
:param mono: input signal as tensor of shape B x 1 x T
:param view: rx/tx position/orientation as tensor of shape B x 7 x K (K = T / 400)
:return: warped: warped left/right ear signal as tensor of shape B x 2 x T
'''
return self.warper(th.cat([mono, mono], dim=1), self._3d_displacements(view))
geometric_warper = GeometricWarper()
dsp_result = geometric_warper(th.Tensor(mono_audio[None, None, :]), th.Tensor(position_array.transpose(1,0))[None, :, :])
soundfile.write(binaural_fn, dsp_result[0].numpy().transpose(1,0), 48000, 'PCM_16')
| 3,528 | 39.563218 | 121 | py |
NeuralSpeech | NeuralSpeech-master/BinauralGrad/metric.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import argparse
import numpy as np
import torch as th
import torchaudio as ta
from src.binauralgrad.losses import L2Loss, AmplitudeLoss, PhaseLoss
import auraloss
import speechmetrics
import sys
result_folder = sys.argv[1]
ref_folder = sys.argv[2]
window_length = 5 # seconds
scoresmetrics = speechmetrics.load(['pesq', 'stoi'], window_length)
mrstft_function = auraloss.freq.MultiResolutionSTFTLoss()
def compute_metrics(binauralized, reference, path_pt, path_gt):
'''
compute l2 error, amplitude error, and angular phase error for the given binaural and reference singal
:param binauralized: 2 x T tensor containing predicted binaural signal
:param reference: 2 x T tensor containing reference binaural signal
:return: errors as a scalar value for each metric and the number of samples in the sequence
'''
binauralized, reference = binauralized.unsqueeze(0), reference.unsqueeze(0)
# compute error metrics
l2_error = L2Loss()(binauralized, reference)
amplitude_error = AmplitudeLoss(sample_rate=48000)(binauralized, reference)
phase_error = PhaseLoss(sample_rate=48000, ignore_below=0.2)(binauralized, reference)
mrstft_error = mrstft_function(binauralized, reference)
scores = scoresmetrics(path_pt, path_gt)
pesq = scores['pesq'] if len(scores['pesq']) == 1 else scores['pesq'][0]
return{
"l2": l2_error,
"amplitude": amplitude_error,
"phase": phase_error,
"mrstft": mrstft_error,
"pesq_score": pesq,
"samples": binauralized.shape[-1]
}
# binauralized and evaluate test sequence for the eight subjects and the validation sequence
test_sequences = [f"subject{i+1}" for i in range(8)] + ["validation_sequence"]
errors = []
for test_sequence in test_sequences:
print(f"Cal {test_sequence}...")
# binauralize and save output
binaural, _ = ta.load(f"{result_folder}/{test_sequence}.wav")
# compute error metrics
reference, sr = ta.load(f"{ref_folder}/{test_sequence}/binaural.wav")
errors.append(compute_metrics(binaural, reference, f"{result_folder}/{test_sequence}.wav", f"{ref_folder}/{test_sequence}/binaural.wav"))
print(errors[-1])
# accumulate errors
sequence_weights = np.array([err["samples"] for err in errors])
sequence_weights = sequence_weights / np.sum(sequence_weights)
l2_error = sum([err["l2"] * sequence_weights[i] for i, err in enumerate(errors)])
amplitude_error = sum([err["amplitude"] * sequence_weights[i] for i, err in enumerate(errors)])
phase_error = sum([err["phase"] * sequence_weights[i] for i, err in enumerate(errors)])
mrstft_error = sum([err["mrstft"] * sequence_weights[i] for i, err in enumerate(errors)])
pesq = sum([err["pesq_score"] * sequence_weights[i] for i, err in enumerate(errors)])
# print accumulated errors on testset
print(f"l2 (x10^3): {l2_error * 1000:.3f}")
print(f"amplitude: {amplitude_error:.3f}")
print(f"phase: {phase_error:.3f}")
print(f"mrstft: {mrstft_error:.3f}")
print(f"pesq: {pesq:.3f}")
| 3,130 | 35.835294 | 141 | py |
NeuralSpeech | NeuralSpeech-master/BinauralGrad/src/binauralgrad/inference.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import os
import torch
import torchaudio
import math
from argparse import ArgumentParser
from binauralgrad.params import AttrDict
import binauralgrad.params as base_params
from binauralgrad.model import BinauralGrad
models = {}
def predict(spectrogram=None, binaural_geowarp=None, tx_view=None, mono=None, binaural_type=None, model_dir=None, params=None, mean_condition=None, device=torch.device('cuda'), fast_sampling=False):
# Lazy load model.
if not model_dir in models:
if os.path.exists(f'{model_dir}/weights.pt'):
checkpoint = torch.load(f'{model_dir}/weights.pt')
else:
checkpoint = torch.load(model_dir)
model = BinauralGrad(AttrDict(params), binaural_type=binaural_type).to(device)
model.load_state_dict(checkpoint['model'])
model.eval()
models[model_dir] = model
model = models[model_dir]
model.params.override(params)
with torch.no_grad():
training_noise_schedule = np.array(model.params.noise_schedule)
inference_noise_schedule = np.array(model.params.inference_noise_schedule) if fast_sampling else training_noise_schedule
talpha = 1 - training_noise_schedule
talpha_cum = np.cumprod(talpha)
beta = inference_noise_schedule
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
T = []
for s in range(len(inference_noise_schedule)):
for t in range(len(training_noise_schedule) - 1):
if talpha_cum[t+1] <= alpha_cum[s] <= talpha_cum[t]:
twiddle = (talpha_cum[t]**0.5 - alpha_cum[s]**0.5) / (talpha_cum[t]**0.5 - talpha_cum[t+1]**0.5)
T.append(t + twiddle)
break
T = np.array(T, dtype=np.float32)
if not model.params.unconditional:
if spectrogram is not None:
if len(spectrogram.shape) == 2:# Expand rank 2 tensors by adding a batch dimension.
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
audio = torch.randn(spectrogram.shape[0], model.params.hop_samples * spectrogram.shape[-1], device=device)
else:
audio = torch.randn(binaural_geowarp.shape[0] - (1 if getattr(model.params, "predict_mean_condition", False) else 0), binaural_geowarp.shape[1], device=device).unsqueeze(0)
binaural_geowarp = binaural_geowarp.unsqueeze(0).type_as(audio)
tx_view = tx_view.unsqueeze(0).type_as(audio)
mono = mono.unsqueeze(0).type_as(audio)
mean_condition = mean_condition.unsqueeze(0).type_as(audio)
else:
audio = torch.randn(1, params.audio_len, device=device)
noise_scale = torch.from_numpy(alpha_cum**0.5).float().unsqueeze(1).to(device)
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n]**0.5
c2 = beta[n] / (1 - alpha_cum[n])**0.5
print(audio.shape, binaural_geowarp.shape, tx_view.shape)
if params.loss_per_layer == 0:
audio = c1 * (audio - c2 * model(audio, torch.tensor([T[n]], device=audio.device), spectrogram, geowarp=binaural_geowarp, view=tx_view, mono=mono, mean_condition=mean_condition)[0])
else:
audio = c1 * (audio - c2 * model(audio, torch.tensor([T[n]], device=audio.device), spectrogram, geowarp=binaural_geowarp, view=tx_view, mono=mono, mean_condition=mean_condition)[0])
if n > 0:
noise = torch.randn_like(audio)
sigma = ((1.0 - alpha_cum[n-1]) / (1.0 - alpha_cum[n]) * beta[n])**0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0)
return audio[0], model.params.sample_rate
def main(args):
if args.spectrogram_path:
spectrogram = torch.from_numpy(np.load(args.spectrogram_path))
else:
spectrogram = None
if args.dsp_path:
mono, _ = torchaudio.load(f"{args.dsp_path}/mono.wav")
binaural, _ = torchaudio.load(f"{args.dsp_path}/binaural.wav")
binaural_geowarp, _ = torchaudio.load(f"{args.dsp_path}/binaural_geowarp.wav")
# receiver is fixed at origin in this dataset, so we only need transmitter view
tx_view = np.loadtxt(f"{args.dsp_path}/tx_positions.txt").transpose()
tx_view = torch.from_numpy(np.repeat(tx_view.T, 400, axis=0).T)
mean_condition_dsp = binaural_geowarp.mean(0, keepdim=True)
mean_condition_gt = binaural.mean(0, keepdim=True)
if args.use_gt_mean_condition:
mean_condition = mean_condition_gt
elif args.mean_condition_folder:
mean_condition, _ = torchaudio.load(f"{args.mean_condition_folder}/{args.output.strip('/').split('/')[-1]}")
else:
mean_condition = mean_condition_dsp
else:
binaural_geowarp = None
tx_view = None
mono = None
mean_condition = None
mean_condition_dsp = None
all_audio = []
clip_len = 2000000
for i in range(int(math.ceil(binaural_geowarp.shape[1] / clip_len))):
audio, sr = predict(spectrogram, binaural_geowarp=binaural_geowarp[ :, clip_len*i: clip_len*(i+1)], tx_view=tx_view[:, clip_len*i: clip_len*(i+1)], mono=mono[:, clip_len*i: clip_len*(i+1)],
binaural_type=getattr(args, "binaural_type", ""), model_dir=args.model_dir, fast_sampling=args.fast,
params=getattr(base_params, args.params), mean_condition=mean_condition[:, clip_len*i: clip_len*(i+1)])
if args.params in []:
if "premean" in args.params:
audio = audio.cpu() + binaural_geowarp[ :, clip_len*i: clip_len*(i+1)].mean(0, keepdim=True)
else:
audio = audio.cpu() + binaural_geowarp[ :, clip_len*i: clip_len*(i+1)]
all_audio.append(audio)
torchaudio.save(args.output, torch.cat(all_audio, axis=-1).cpu(), sample_rate=sr)
if __name__ == '__main__':
parser = ArgumentParser(description='runs inference')
parser.add_argument('model_dir',
help='directory containing a trained model (or full path to weights.pt file)')
parser.add_argument('--spectrogram_path', '-s',
help='path to a spectrogram file')
parser.add_argument('--dsp_path', '-d',
help='path to dsp folder')
parser.add_argument('--binaural_type', '-b',
help='binaural type')
parser.add_argument('--output', '-o', default='output.wav',
help='output file name')
parser.add_argument('--fast', '-f', action='store_true',
help='fast sampling procedure')
parser.add_argument('--params', default="params", type=str,
help='param set name')
parser.add_argument('--use-gt-mean-condition', action='store_true', default=False,
help='use gt stage 1')
parser.add_argument('--mean-condition-folder', default='', type=str,
help='mean condition folder')
main(parser.parse_args())
| 6,559 | 41.875817 | 198 | py |
NeuralSpeech | NeuralSpeech-master/BinauralGrad/src/binauralgrad/losses.py | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
# reference: https://github.com/facebookresearch/BinauralSpeechSynthesis/blob/main/src/losses.py
import numpy as np
import torch as th
import torchaudio as ta
class FourierTransform:
def __init__(self,
fft_bins=2048,
win_length_ms=40,
frame_rate_hz=100,
causal=False,
preemphasis=0.0,
sample_rate=48000,
normalized=False):
self.sample_rate = sample_rate
self.frame_rate_hz = frame_rate_hz
self.preemphasis = preemphasis
self.fft_bins = fft_bins
self.win_length = int(sample_rate * win_length_ms / 1000)
self.hop_length = int(sample_rate / frame_rate_hz)
self.causal = causal
self.normalized = normalized
if self.win_length > self.fft_bins:
print('FourierTransform Warning: fft_bins should be larger than win_length')
def _convert_format(self, data, expected_dims):
if not type(data) == th.Tensor:
data = th.Tensor(data)
if len(data.shape) < expected_dims:
data = data.unsqueeze(0)
if not len(data.shape) == expected_dims:
raise Exception(f"FourierTransform: data needs to be a Tensor with {expected_dims} dimensions but got shape {data.shape}")
return data
def _preemphasis(self, audio):
if self.preemphasis > 0:
return th.cat((audio[:, 0:1], audio[:, 1:] - self.preemphasis * audio[:, :-1]), dim=1)
return audio
def _revert_preemphasis(self, audio):
if self.preemphasis > 0:
for i in range(1, audio.shape[1]):
audio[:, i] = audio[:, i] + self.preemphasis * audio[:, i-1]
return audio
def _magphase(self, complex_stft):
mag, phase = ta.functional.magphase(complex_stft, 1.0)
return mag, phase
def stft(self, audio):
'''
wrapper around th.stft
audio: wave signal as th.Tensor
'''
hann = th.hann_window(self.win_length)
hann = hann.cuda() if audio.is_cuda else hann
spec = th.stft(audio, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length,
window=hann, center=not self.causal, normalized=self.normalized)
return spec.contiguous()
def complex_spectrogram(self, audio):
'''
audio: wave signal as th.Tensor
return: th.Tensor of size channels x frequencies x time_steps (channels x y_axis x x_axis)
'''
self._convert_format(audio, expected_dims=2)
audio = self._preemphasis(audio)
return self.stft(audio)
def magnitude_phase(self, audio):
'''
audio: wave signal as th.Tensor
return: tuple containing two th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
'''
stft = self.complex_spectrogram(audio)
return self._magphase(stft)
def mag_spectrogram(self, audio):
'''
audio: wave signal as th.Tensor
return: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
'''
return self.magnitude_phase(audio)[0]
def power_spectrogram(self, audio):
'''
audio: wave signal as th.Tensor
return: power spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
'''
return th.pow(self.mag_spectrogram(audio), 2.0)
def phase_spectrogram(self, audio):
'''
audio: wave signal as th.Tensor
return: phase spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
'''
return self.magnitude_phase(audio)[1]
def mel_spectrogram(self, audio, n_mels):
'''
audio: wave signal as th.Tensor
n_mels: number of bins used for mel scale warping
return: mel spectrogram as th.Tensor of size channels x n_mels x time_steps for magnitude and phase spectrum
'''
spec = self.power_spectrogram(audio)
mel_warping = ta.transforms.MelScale(n_mels, self.sample_rate)
return mel_warping(spec)
def complex_spec2wav(self, complex_spec, length):
'''
inverse stft
complex_spec: complex spectrum as th.Tensor of size channels x frequencies x time_steps x 2 (real part/imaginary part)
length: length of the audio to be reconstructed (in frames)
'''
complex_spec = self._convert_format(complex_spec, expected_dims=4)
hann = th.hann_window(self.win_length)
hann = hann.cuda() if complex_spec.is_cuda else hann
wav = ta.functional.istft(complex_spec, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length, window=hann, length=length, center=not self.causal)
wav = self._revert_preemphasis(wav)
return wav
def magphase2wav(self, mag_spec, phase_spec, length):
'''
reconstruction of wav signal from magnitude and phase spectrum
mag_spec: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps
phase_spec: phase spectrum as th.Tensor of size channels x frequencies x time_steps
length: length of the audio to be reconstructed (in frames)
'''
mag_spec = self._convert_format(mag_spec, expected_dims=3)
phase_spec = self._convert_format(phase_spec, expected_dims=3)
complex_spec = th.stack([mag_spec * th.cos(phase_spec), mag_spec * th.sin(phase_spec)], dim=-1)
return self.complex_spec2wav(complex_spec, length)
class Loss(th.nn.Module):
def __init__(self, mask_beginning=0):
'''
base class for losses that operate on the wave signal
:param mask_beginning: (int) number of samples to mask at the beginning of the signal
'''
super().__init__()
self.mask_beginning = mask_beginning
def forward(self, data, target):
'''
:param data: predicted wave signals in a B x channels x T tensor
:param target: target wave signals in a B x channels x T tensor
:return: a scalar loss value
'''
data = data[..., self.mask_beginning:]
target = target[..., self.mask_beginning:]
return self._loss(data, target)
def _loss(self, data, target):
pass
class L2Loss(Loss):
def _loss(self, data, target):
'''
:param data: predicted wave signals in a B x channels x T tensor
:param target: target wave signals in a B x channels x T tensor
:return: a scalar loss value
'''
return th.mean((data - target).pow(2))
class AmplitudeLoss(Loss):
def __init__(self, sample_rate, mask_beginning=0):
'''
:param sample_rate: (int) sample rate of the audio signal
:param mask_beginning: (int) number of samples to mask at the beginning of the signal
'''
super().__init__(mask_beginning)
self.fft = FourierTransform(sample_rate=sample_rate)
def _transform(self, data):
return self.fft.stft(data.view(-1, data.shape[-1]))
def _loss(self, data, target):
'''
:param data: predicted wave signals in a B x channels x T tensor
:param target: target wave signals in a B x channels x T tensor
:return: a scalar loss value
'''
data, target = self._transform(data), self._transform(target)
data = th.sum(data**2, dim=-1) ** 0.5
target = th.sum(target**2, dim=-1) ** 0.5
return th.mean(th.abs(data - target))
class PhaseLoss(Loss):
def __init__(self, sample_rate, mask_beginning=0, ignore_below=0.1):
'''
:param sample_rate: (int) sample rate of the audio signal
:param mask_beginning: (int) number of samples to mask at the beginning of the signal
'''
super().__init__(mask_beginning)
self.ignore_below = ignore_below
self.fft = FourierTransform(sample_rate=sample_rate)
def _transform(self, data):
return self.fft.stft(data.reshape(-1, data.shape[-1]))
def _loss(self, data, target):
'''
:param data: predicted wave signals in a B x channels x T tensor
:param target: target wave signals in a B x channels x T tensor
:return: a scalar loss value
'''
data, target = self._transform(data).view(-1, 2), self._transform(target).view(-1, 2)
# ignore low energy components for numerical stability
target_energy = th.sum(th.abs(target), dim=-1)
pred_energy = th.sum(th.abs(data.detach()), dim=-1)
target_mask = target_energy > self.ignore_below * th.mean(target_energy)
pred_mask = pred_energy > self.ignore_below * th.mean(target_energy)
indices = th.nonzero(target_mask * pred_mask).view(-1)
data, target = th.index_select(data, 0, indices), th.index_select(target, 0, indices)
# compute actual phase loss in angular space
data_angles, target_angles = th.atan2(data[:, 0], data[:, 1]), th.atan2(target[:, 0], target[:, 1])
loss = th.abs(data_angles - target_angles)
# positive + negative values in left part of coordinate system cause angles > pi
# => 2pi -> 0, 3/4pi -> 1/2pi, ... (triangle function over [0, 2pi] with peak at pi)
loss = np.pi - th.abs(loss - np.pi)
return th.mean(loss)
| 9,657 | 39.751055 | 176 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.