repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
deep_equilibrium_inverse | deep_equilibrium_inverse-main/operators/singlecoil_mri.py | import torch, numbers, math
import torch.nn as nn
import torch.nn.functional as torchfunc
from operators.operator import LinearOperator
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def apply_mask(data, mask_func, seed=None, padding=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
if padding is not None:
mask[:, :, :padding[0]] = 0
mask[:, :, padding[1]:] = 0 # padding value inclusive on right of zeros
masked_data = data * mask + 0.0 # The + 0.0 removes the sign of the zeros
return masked_data, mask
def mask_center(x, mask_from, mask_to):
# b, c, h, w, two = x.shape
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def complex_mul(x, y):
assert x.shape[-1] == y.shape[-1] == 2
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x):
assert x.shape[-1] == 2
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
def fft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def dft_matrix(N, mask):
learnable_parameters = torch.arange(0,N, dtype=torch.float32)
learnable_parameters.requires_grad_(True)
mask_vec = fftshift(mask[0, :], dim=0)
mask_vec = mask_vec > 0
mask_vec = mask_vec.squeeze()
masked_params = torch.masked_select(learnable_parameters, mask_vec)
normalizer = np.sqrt(N)
ii, jj = torch.meshgrid(masked_params, torch.arange(0,N, dtype=torch.float32))
W = torch.exp(-2.0 * np.pi * 1j * ii*jj / N) / normalizer
return W
def onedfft(data, dim):
# data = ifftshift(data, dim=dim)
dim_size = data.shape[dim]
for ii in range(dim_size):
if dim==1:
data[:,ii,:] = torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=0, norm="ortho")
else:
data[ii, :, :] = torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=1, norm="ortho")
# data = ifftshift(data, dim=dim)
return data
def onedifft(data, dim):
# data = ifftshift(data, dim=dim)
dim_size = data.shape[dim]
for ii in range(dim_size):
if dim==1:
data[:,ii,:] = torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=0, norm="ortho")
else:
data[ii, :, :] = torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=1, norm="ortho")
# data = ifftshift(data, dim=dim)
return data
def ifft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_sq(data):
"""
Compute the squared absolute value of a complex tensor
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1)
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def root_sum_of_squares_complex(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt(complex_abs_sq(data).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(x, y):
"""
Apply a center crop on the larger image to the size of the smaller image.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
class ApplyKSpaceMask(nn.Module):
def __init__(self, mask):
super(ApplyKSpaceMask, self).__init__()
self.mask = mask
def forward(self, input):
kspace_data = fft2(ifftshift(input))
masked_kspace_data = kspace_data * self.mask + 0.0
visual_data = fftshift(ifft2(masked_kspace_data))
return visual_data
def gaussian_oned(x):
return 1.0 / np.sqrt(2.0*np.pi) * np.exp(-1*x**2 / 2.0)
def find_nearest(x, array):
idx = (np.abs(x - array)).argmin()
return idx
def exhaustive_sample(center_frac, acceleration, n_cols, seed):
grid = np.linspace(-3.0,3.0,n_cols)
sample_grid = np.zeros((n_cols,))
num_low_freqs = int(round(n_cols * center_frac))
pad = (n_cols - num_low_freqs + 1) // 2
sample_grid[pad:pad+num_low_freqs] = [True]*num_low_freqs
rng = np.random.RandomState(seed=seed)
while True:
sample_point = rng.standard_normal()
if np.abs(sample_point) < 3.0:
nearest_index = find_nearest(sample_point, grid)
sample_grid[nearest_index] = True
ratio_sampled = n_cols / sum(sample_grid)
if acceleration > ratio_sampled:
return sample_grid
def create_mask(shape, center_fraction, acceleration, seed=0, flipaxis=False):
num_cols = shape[-2]
# Create the mask
mask = exhaustive_sample(center_fraction, acceleration, num_cols, seed)
# num_low_freqs = int(round(num_cols * center_fraction))
# prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
# rng = np.random.RandomState(seed=seed)
#
# mask = rng.standard_normal(size=num_cols) < prob
# pad = (num_cols - num_low_freqs + 1) // 2
# mask[pad:pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
if flipaxis:
mask_shape[0] = num_cols
else:
mask_shape[-2] = num_cols
# mask = mask.astype(np.float32)
mask = mask.reshape(*mask_shape).astype(np.float32)
# print(mask.shape)
# exit()
mask = torch.tensor(mask, requires_grad=False)
return mask
class toKspace(nn.Module):
def __init__(self, mask=None):
super(toKspace, self).__init__()
if mask is None:
self.mask = mask
else:
self.register_buffer('mask', tensor=mask)
def forward(self, input):
kspace_data = fft2(ifftshift(input.permute((0,2,3,1))))
if self.mask is not None:
kspace_data = kspace_data * self.mask + 0.0
return kspace_data.permute((0,3,1,2))
class toKspaceMulti(nn.Module):
def __init__(self, masks):
super(toKspaceMulti, self).__init__()
self.masks = masks
self.ii = 0
def advance_ii(self):
self.ii = (self.ii + 1) % 3
def forward(self, input):
kspace_data = fft2(ifftshift(input.permute((0,2,3,1))))
mask = self.masks[self.ii]
kspace_data = kspace_data * mask + 0.0
return kspace_data.permute((0,3,1,2))
class fromKspace(nn.Module):
def __init__(self, mask=None):
super(fromKspace, self).__init__()
if mask is None:
self.mask = mask
else:
self.register_buffer('mask', tensor=mask)
def forward(self, input):
if self.mask is not None:
input = input.permute((0,2,3,1)) * self.mask + 0.0
else:
input = input.permute((0,2,3,1))
image_data = ifftshift(ifft2(input))
return image_data.permute((0,3,1,2))
class cartesianSingleCoilMRI(LinearOperator):
def __init__(self, kspace_mask):
super(cartesianSingleCoilMRI, self).__init__()
self.register_buffer('mask', tensor=kspace_mask)
def forward(self, input):
input = ifftshift(input.permute((0, 2, 3, 1)))
complex_input = torch.view_as_complex(input)
kspace = torch.fft.fftn(complex_input, dim=1, norm="ortho")
kspace = torch.fft.fftn(kspace, dim=2, norm="ortho")
kspace = fftshift(kspace)
if self.mask is not None:
kspace_data = kspace * self.mask + 0.0
kspace_data = ifftshift(kspace_data)
return torch.view_as_real(kspace_data)
def gramian(self, input):
input = ifftshift(input.permute((0, 2, 3, 1)))
complex_input = torch.view_as_complex(input)
kspace = torch.fft.fftn(complex_input, dim=1, norm="ortho")
kspace = torch.fft.fftn(kspace, dim=2, norm="ortho")
kspace = fftshift(kspace)
if self.mask is not None:
kspace_data = kspace * self.mask + 0.0
kspace_data = ifftshift(kspace_data)
kspace_data = torch.fft.ifftn(kspace_data, dim=1, norm="ortho")
realspace = torch.fft.ifftn(kspace_data, dim=2, norm="ortho")
realspace = torch.view_as_real(realspace)
output = ifftshift(realspace).permute((0,3,1,2))
return output
def adjoint(self, input):
complex_input = torch.view_as_complex(input)
complex_input = torch.fft.ifftn(complex_input, dim=1, norm="ortho")
realspace = torch.fft.ifftn(complex_input, dim=2, norm="ortho")
realspace = torch.view_as_real(realspace)
output = ifftshift(realspace).permute((0, 3, 1, 2))
return output | 15,854 | 31.623457 | 99 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/operators/blurs.py | import numpy as np
import numbers
import math
import cv2
import torch
import torch.nn.functional as torchfunc
from operators.operator import LinearOperator
class GaussianBlur(LinearOperator):
def __init__(self, sigma, kernel_size=5, n_channels=3, n_spatial_dimensions = 2):
super(GaussianBlur, self).__init__()
self.groups = n_channels
if isinstance(kernel_size, numbers.Number):
self.padding = int(math.floor(kernel_size/2))
kernel_size = [kernel_size] * n_spatial_dimensions
else:
print('KERNEL SIZE MUST BE A SINGLE INTEGER - RECTANGULAR KERNELS NOT SUPPORTED AT THIS TIME')
exit()
self.gaussian_kernel = torch.nn.Parameter(self.create_gaussian_kernel(sigma, kernel_size, n_channels),
requires_grad=False)
def create_gaussian_kernel(self, sigma, kernel_size, n_channels):
kernel = 1
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for size, mgrid in zip(kernel_size, meshgrids):
mean = (size - 1) / 2
kernel *= torch.exp(-((mgrid - mean) / sigma) ** 2 / 2)
# Make sure norm of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(n_channels, *[1] * (kernel.dim() - 1))
return kernel
def forward(self, x):
return torchfunc.conv2d(x, weight=self.gaussian_kernel, groups=self.groups, padding=self.padding)
def adjoint(self, x):
return torchfunc.conv2d(x, weight=self.gaussian_kernel, groups=self.groups, padding=self.padding)
class SingleAngleMotionBlur(LinearOperator):
def __init__(self, angle, kernel_size=5, n_channels=3, n_spatial_dimensions = 2):
super(SingleAngleMotionBlur, self).__init__()
self.groups = n_channels
self.blur_kernel = torch.nn.Parameter(self.create_motionblur_kernel(angle, kernel_size, n_channels),
requires_grad=False)
if isinstance(kernel_size, numbers.Number):
self.padding = int(math.floor(kernel_size/2))
kernel_size = [kernel_size] * n_spatial_dimensions
else:
print('KERNEL SIZE MUST BE A SINGLE INTEGER - RECTANGULAR KERNELS NOT SUPPORTED AT THIS TIME')
exit()
def create_motionblur_kernel(self, angle, kernel_size, n_channels):
kernel = np.zeros((kernel_size, kernel_size))
kernel[(kernel_size - 1) // 2, :] = np.ones(kernel_size, dtype=np.float32)
kernel = cv2.warpAffine(kernel, cv2.getRotationMatrix2D((kernel_size / 2 - 0.5, kernel_size / 2 - 0.5),
angle, 1.0), (kernel_size, kernel_size))
kernel = torch.tensor(kernel, dtype=torch.float32)
# Make sure norm of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(n_channels, *[1] * (kernel.dim() - 1))
return kernel
def forward(self, x):
convolution_weight = self.blur_kernel
return torchfunc.conv2d(x, weight=convolution_weight, groups=self.groups, padding=self.padding)
def adjoint(self, x):
convolution_weight = torch.transpose(self.blur_kernel, dim0=2, dim1=3)
return torchfunc.conv2d(x, weight=convolution_weight, groups=self.groups, padding=self.padding) | 3,604 | 47.716216 | 111 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/solvers/broyd_equilibrium_utils.py | import torch.nn as nn
import torch
import matplotlib
#matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import imageio
import numpy as np
from PIL import Image
def _safe_norm(v):
if not torch.isfinite(v).all():
return np.inf
return torch.norm(v)
def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
ite = 0
phi_a0 = phi(alpha0) # First do an update with step size 1
if phi_a0 <= phi0 + c1 * alpha0 * derphi0:
return alpha0, phi_a0, ite
# Otherwise, compute the minimizer of a quadratic interpolant
alpha1 = -(derphi0) * alpha0 ** 2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = phi(alpha1)
# Otherwise loop with cubic interpolation until we find an alpha which
# satisfies the first Wolfe condition (since we are backtracking, we will
# assume that the value of alpha is not too small and satisfies the second
# condition.
while alpha1 > amin: # we are assuming alpha>0 is a descent direction
factor = alpha0 ** 2 * alpha1 ** 2 * (alpha1 - alpha0)
a = alpha0 ** 2 * (phi_a1 - phi0 - derphi0 * alpha1) - \
alpha1 ** 2 * (phi_a0 - phi0 - derphi0 * alpha0)
a = a / factor
b = -alpha0 ** 3 * (phi_a1 - phi0 - derphi0 * alpha1) + \
alpha1 ** 3 * (phi_a0 - phi0 - derphi0 * alpha0)
b = b / factor
alpha2 = (-b + torch.sqrt(torch.abs(b ** 2 - 3 * a * derphi0))) / (3.0 * a)
phi_a2 = phi(alpha2)
ite += 1
if (phi_a2 <= phi0 + c1 * alpha2 * derphi0):
return alpha2, phi_a2, ite
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2 / alpha1) < 0.96:
alpha2 = alpha1 / 2.0
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi_a2
# Failed to find a suitable step length
return None, phi_a1, ite
def line_search(update, x0, g0, g, nstep=0, on=True):
"""
`update` is the propsoed direction of update.
Code adapted from scipy.
"""
tmp_s = [0]
tmp_g0 = [g0]
tmp_phi = [torch.norm(g0) ** 2]
s_norm = torch.norm(x0) / torch.norm(update)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0] # If the step size is so small... just return something
x_est = x0 + s * update
g0_new = g(x_est)
phi_new = _safe_norm(g0_new) ** 2
if store:
tmp_s[0] = s
tmp_g0[0] = g0_new
tmp_phi[0] = phi_new
return phi_new
if on:
s, phi1, ite = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], amin=1e-2)
if (not on) or s is None:
s = 1.0
ite = 0
x_est = x0 + s * update
if s == tmp_s[0]:
g0_new = tmp_g0[0]
else:
g0_new = g(x_est)
return x_est, g0_new, x_est - x0, g0_new - g0, ite
def rmatvec(part_Us, part_VTs, x):
# Compute x^T(-I + UV^T)
# x: (N, 2d, L')
# part_Us: (N, 2d, L', threshold)
# part_VTs: (N, threshold, 2d, L')
if part_Us.nelement() == 0:
return -x
xTU = torch.einsum('bij, bijd -> bd', x, part_Us) # (N, threshold)
return -x + torch.einsum('bd, bdij -> bij', xTU, part_VTs) # (N, 2d, L'), but should really be (N, 1, (2d*L'))
def matvec(part_Us, part_VTs, x):
# Compute (-I + UV^T)x
# x: (N, 2d, L')
# part_Us: (N, 2d, L', threshold)
# part_VTs: (N, threshold, 2d, L')
if part_Us.nelement() == 0:
return -x
VTx = torch.einsum('bdij, bij -> bd', part_VTs, x) # (N, threshold)
return -x + torch.einsum('bijd, bd -> bij', part_Us, VTx) # (N, 2d, L'), but should really be (N, (2d*L'), 1)
def broyden(g, x0, threshold=9, eps=1e-5, ls=False):
x0_shape = x0.shape
x0 = x0.reshape((x0.shape[0], -1, 1))
bsz, total_hsize, n_elem = x0.size()
dev = x0.device
x_est = x0 # (bsz, 2d, L')
gx = g(x_est) # (bsz, 2d, L')
nstep = 0
tnstep = 0
LBFGS_thres = min(threshold, 27)
# For fast calculation of inv_jacobian (approximately)
Us = torch.zeros(bsz, total_hsize, n_elem, LBFGS_thres).to(dev)
VTs = torch.zeros(bsz, LBFGS_thres, total_hsize, n_elem).to(dev)
update = gx
new_objective = init_objective = torch.norm(gx).item()
prot_break = False
trace = [init_objective]
new_trace = [-1]
# To be used in protective breaks
protect_thres = 1e6 * n_elem
lowest = new_objective
lowest_xest, lowest_gx, lowest_step = x_est, gx, nstep
while new_objective >= eps and nstep < threshold:
x_est, gx, delta_x, delta_gx, ite = line_search(update, x_est, gx, g, nstep=nstep, on=ls)
nstep += 1
tnstep += (ite + 1)
new_objective = torch.norm(gx).item()
trace.append(new_objective)
try:
new2_objective = torch.norm(delta_x).item() / (torch.norm(x_est - delta_x).item()) # Relative residual
except:
new2_objective = torch.norm(delta_x).item() / (torch.norm(x_est - delta_x).item() + 1e-9)
new_trace.append(new2_objective)
if new_objective < lowest:
lowest_xest, lowest_gx = x_est.clone().detach(), gx.clone().detach()
lowest = new_objective
lowest_step = nstep
if new_objective < eps:
# print(nstep)
break
if new_objective < 3 * eps and nstep > 30 and np.max(trace[-30:]) / np.min(trace[-30:]) < 1.3:
# if there's hardly been any progress in the last 30 steps
# print(nstep)
break
if new_objective > init_objective * protect_thres:
# prot_break = True
# print(nstep)
break
part_Us, part_VTs = Us[:, :, :, :(nstep - 1)], VTs[:, :(nstep - 1)]
vT = rmatvec(part_Us, part_VTs, delta_x)
u = (delta_x - matvec(part_Us, part_VTs, delta_gx)) / torch.einsum('bij, bij -> b', vT, delta_gx)[:, None, None]
vT[vT != vT] = 0
u[u != u] = 0
VTs[:, (nstep - 1) % LBFGS_thres] = vT
Us[:, :, :, (nstep - 1) % LBFGS_thres] = u
update = -matvec(Us[:, :, :, :nstep], VTs[:, :nstep], gx)
Us, VTs = None, None
lowest_xest = lowest_xest.reshape(x0_shape)
return lowest_xest, torch.norm(lowest_gx).item()
# return {"result": lowest_xest,
# "nstep": nstep,
# "tnstep": tnstep,
# "lowest_step": lowest_step,
# "diff": torch.norm(lowest_gx).item(),
# "diff_detail": torch.norm(lowest_gx, dim=1),
# "prot_break": prot_break,
# "trace": trace,
# "new_trace": new_trace,
# "eps": eps,
# "threshold": threshold}
def L2Norm(x):
return torch.sum(x**2, dim=[1,2,3], keepdim=True)
def epsilon2(f, x0, max_iter=50, tol=1e-2, lam=1e-4):
x = x0
for k in range(max_iter):
f_x = f(x)
delta_x = f_x - x
delta_f = f(f_x) - f_x
delta2_x = delta_f - delta_x
# term1 = delta_f * L2Norm(delta_x)
# term2 = delta_x * L2Norm(delta_f)
x_new = f_x + (delta_f * L2Norm(delta_x) - delta_x * L2Norm(delta_f)) / (L2Norm(delta2_x) + lam)
residual = (x_new - x).norm().item() / x_new.norm().item()
x = x_new
if (residual < tol):
break
return x, residual
def forward_iteration(f, x0, max_iter=50, tol=1e-5):
f0 = f(x0)
res = []
for k in range(max_iter):
x = f0
f0 = f(x)
res.append((f0 - x).norm().item() / (1e-7 + f0.norm().item()))
if (res[-1] < tol):
break
return f0, res
def forward_iteration_plot(f, x0, max_iter=50, tol=1e-5):
f0 = f(x0)
res = []
fig = plt.figure()
for k in range(max_iter):
x = f0
f0 = f(x)
# sub = fig.add_subplot(10,10, k)
# plt.imshow(f0[0, : , :, :].detach().cpu().numpy())
# plt.show()
res.append((f0 - x).norm().item() / (1e-7 + f0.norm().item()))
if (res[-1] < tol):
break
plt.show()
return f0, res
class DEQFixedPoint(nn.Module):
def __init__(self, f, **kwargs):
super().__init__()
self.f = f
self.kwargs = kwargs
def broyd_output_test(self, z, x, y_shape, input_shape):
reshaped_x = torch.reshape(input=x, shape=y_shape)
reshaped_z = torch.reshape(input=z, shape=input_shape)
output = self.f(reshaped_z, reshaped_x) - reshaped_z
flattened = torch.reshape(output, (output.shape[0], -1)).unsqueeze(-1)
return flattened
# def broyd_grad(self, g, z, x, g_shape, z_shape):
# self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
# grad, **self.kwargs)
def internal_g(self, z, x):
return self.f(z, x) - z
def forward(self, x, truth = None, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
# init_point = torch.reshape(init_point, (init_point.shape[0], -1, 1))
initial_point_shape = initial_point.shape
g = lambda z: self.broyd_output_test(z, x, x.shape, initial_point_shape)
with torch.no_grad():
output_x, self.forward_res = broyden(g, init_point, threshold=self.kwargs['max_iter'], eps=1e-8)
# output_x = torch.reshape(output_x, initial_point_shape)
z = self.f(output_x, x)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
# g0 = f0 - z0
def backward_hook(grad):
def internal_function(y):
input_shape = y.shape
y = y.reshape(grad.shape)
broyden_function = grad + torch.autograd.grad(f0, z0, y, retain_graph=True)[0]
g_version = broyden_function - y
g_version = g_version.reshape(input_shape)
return g_version
result = broyden(internal_function, grad, threshold=10, eps=1e-7)
return result[0]
z.register_hook(backward_hook)
return z
class DEQFixedPointSimple(nn.Module):
def __init__(self, f, **kwargs):
super().__init__()
self.f = f
self.kwargs = kwargs
def broyd_output_test(self, z, x, y_shape, input_shape):
reshaped_x = torch.reshape(input=x, shape=y_shape)
reshaped_z = torch.reshape(input=z, shape=input_shape)
output = self.f(reshaped_z, reshaped_x) - reshaped_z
flattened = torch.reshape(output, (output.shape[0], -1)).unsqueeze(-1)
return flattened
def internal_g(self, z, x):
return self.f(z, x) - z
def forward(self, x, truth=None, initial_point=None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
# init_point = torch.reshape(init_point, (init_point.shape[0], -1, 1))
initial_point_shape = initial_point.shape
g = lambda z: self.broyd_output_test(z, x, x.shape, initial_point_shape)
with torch.no_grad():
output_x, self.forward_res = broyden(g, init_point, threshold=self.kwargs['max_iter'], eps=1e-7)
# output_x = torch.reshape(output_x, initial_point_shape)
z = self.f(output_x, x)
return z
# def forward(self, x, initial_point = None):
# if initial_point is None:
# init_point = torch.zeros_like(x)
# else:
# init_point = initial_point
# # compute forward pass and re-engage autograd tape
# with torch.no_grad():
# z, self.forward_res = self.solver(lambda z: self.f(z, x), init_point, **self.kwargs)
# z = self.f(z, x)
#
# # set up Jacobian vector product (without additional forward calls)
# z0 = z.clone().detach().requires_grad_()
# f0 = self.f(z0, x)
#
# def backward_hook(grad):
# g, self.backward_res = self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
# grad, **self.kwargs)
# return g
#
# z.register_hook(backward_hook)
# return z
class DEQFixedPoint2(nn.Module):
def __init__(self, f, **kwargs):
super().__init__()
self.f = f
self.kwargs = kwargs
def broyd_output_test(self, z, x, y_shape, input_shape):
reshaped_x = torch.reshape(input=x, shape=y_shape)
reshaped_z = torch.reshape(input=z, shape=input_shape)
output = self.f(reshaped_z, reshaped_x) - reshaped_z
flattened = torch.reshape(output, (output.shape[0], -1)).unsqueeze(-1)
return flattened
def internal_g(self, z, x):
return self.f(z, x) - z
def forward(self, x, truth = None, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
# init_point = torch.reshape(init_point, (init_point.shape[0], -1, 1))
initial_point_shape = initial_point.shape
g = lambda z: self.broyd_output_test(z, x, x.shape, initial_point_shape)
with torch.no_grad():
output_x, self.forward_res = broyden(g, init_point, threshold=100, eps=1e-7)
# output_x = torch.reshape(output_x, initial_point_shape)
z = self.f(output_x, x)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
# g0 = f0 - z0
def backward_hook(grad):
g, self.backward_res = self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
grad, **self.kwargs)
return g
z.register_hook(backward_hook)
return z
class DEQFixedPointTest(nn.Module):
def __init__(self, f, solver, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.kwargs = kwargs
def broyd_output_test(self, z, x, y_shape, input_shape):
reshaped_x = torch.reshape(input=x, shape=y_shape)
reshaped_z = torch.reshape(input=z, shape=input_shape)
output = self.f(reshaped_z, reshaped_x) - reshaped_z
flattened = torch.reshape(output, (output.shape[0], -1)).unsqueeze(-1)
return flattened
def forward(self, x, truth = None, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
init_point = torch.reshape(init_point, (init_point.shape[0], -1, 1))
initial_point_shape = initial_point.shape
g = lambda z: self.broyd_output_test(z, x, x.shape, initial_point_shape)
with torch.no_grad():
output_x, self.forward_res = broyden(g, init_point, threshold=50, eps=1e-7)
output_x = torch.reshape(output_x, initial_point_shape)
return output_x
def neumann_iteration(f, x0,k=10):
accumulator = x0
current_iterate = x0
for _ in range(k):
current_iterate = f(current_iterate)
accumulator = accumulator + current_iterate
return accumulator
class DEQFixedPointNeumann(nn.Module):
def __init__(self, f, solver, neumann_k, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.neumann_k = neumann_k
self.kwargs = kwargs
def forward(self, x):
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), torch.zeros_like(x), **self.kwargs)
z = self.f(z, x)
# set up Jacobian vector product (without additional forward calls)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
def backward_hook(grad):
g = neumann_iteration(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0],
grad, self.neumann_k)
return g
z.register_hook(backward_hook)
return z
def get_equilibrium_point(solver, z, max_iterations=50, tolerance = 0.001):
running_iterate = z
for iteration in range(max_iterations):
running_iterate = solver(running_iterate)
return running_iterate, running_iterate
def get_equilibrium_point_plot(solver, z, truth, max_iterations=50, tolerance = 0.001):
running_iterate = z
# fig = plt.figure()
jj = 0
for iteration in range(max_iterations):
# if iteration % 10 == 0:
# sub = fig.add_subplot(2, 5, jj+1)
# img_to_show = torch.abs(running_iterate[0, :, :, :] - truth[0,:,:,:])*5.0
# # plt.imshow((running_iterate[0, :, :, :].permute(1,2,0).cpu().detach().numpy() + 1.0) / 2.0)
# # plt.show()
# # sub.imshow((img_to_show.permute(1,2,0).detach().cpu().numpy() + 1.0)/2.0)
# sub.imshow(img_to_show.permute(1,2,0).detach().cpu().numpy())
#
# jj += 1
running_iterate = solver(running_iterate)
# plt.show()
return running_iterate, running_iterate
| 17,348 | 34.478528 | 120 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/solvers/equilibrium_nets.py | import torch.nn as nn
import torch
from solvers.cg_utils import conjugate_gradient
class EquilibriumGrad(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta_initial_val=0.1, minval = -1, maxval = 1):
super(EquilibriumGrad,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def set_initial_point(self, y):
self.initial_point = self._linear_adjoint(y)
def get_gradient(self, z, y):
return self.linear_op.gramian(z) - self._linear_adjoint(y) - self.nonlinear_op(z)
def forward(self, z, y):
z_tplus1 = z - self.eta * self.get_gradient(z, y)
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class PrecondNeumannNet(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, lambda_initial_val=0.1, cg_iterations=10):
super(PrecondNeumannNet,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.cg_iterations = cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(lambda_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
# This is a bit redundant
def initial_point(self, y):
preconditioned_input = conjugate_gradient(y, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return preconditioned_input
def single_block(self, input):
preconditioned_step = conjugate_gradient(input, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return self.eta * preconditioned_step - self.nonlinear_op(input)
def forward(self, y, iterations):
initial_point = self.eta * self.initial_point(y)
running_term = initial_point
accumulator = initial_point
for bb in range(iterations):
running_term = self.single_block(running_term)
accumulator = accumulator + running_term
return accumulator
| 3,395 | 39.915663 | 123 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/solvers/proxgrad.py | import torch.nn as nn
import torch
from solvers.cg_utils import conjugate_gradient
from PIL import Image
import imageio
import numpy as np
tt=0
class ProxgradNet(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta_initial_val=0.1):
super(ProxgradNet,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
# This is a bit redundant
def initial_point(self, y):
return self._linear_adjoint(y)
def initial_point_precond(self, y):
initial_point = self._linear_adjoint(y)
preconditioned_input = conjugate_gradient(initial_point, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=60)
return preconditioned_input
def single_block(self, input, y):
grad_update = input - self.eta * (self.linear_op.gramian(input) - self._linear_adjoint(y))
return self.nonlinear_op(grad_update) + grad_update
def forward(self, y, iterations):
initial_point = self.initial_point_precond(y)
running_term = initial_point
# global tt
# bsz = initial_point.shape[0]
# past_iterate = initial_point
for bb in range(iterations):
running_term = self.single_block(running_term, y)
# #img_array = (np.clip(np.transpose(running_term.cpu().detach().numpy(), (0, 2, 3, 1)), -1,
# # 1) + 1.0) * 127.5
# img_array = torch.norm(running_term, dim=1).cpu().detach().numpy() * 255.0 / np.sqrt(2)
# img_array = img_array.astype(np.uint8)
#
# residual = torch.norm(running_term - past_iterate, dim=1).cpu().detach().numpy()
# if bb % 10 == 2:
# for k in range(bsz):
# #filename = "/share/data/vision-greg2/users/gilton/test_imgs/deblur/img/" + str(tt + k) + "_" + str(bb) + ".png"
# filename = "/share/data/vision-greg2/users/gilton/test_imgs/mrie2e/img/" + str(tt + k) + "_" + str(bb) + ".png"
# #filename = "/share/data/vision-greg2/users/gilton/test_imgs/cs/img/" + str(tt + k) + "_" + str(bb) + ".png"
# output_img = Image.fromarray(img_array[k, ...])
# output_img = output_img.resize((512, 512), resample=Image.NEAREST)
# imageio.imwrite(filename, output_img, format='PNG-PIL')
#
# #filename = "/share/data/vision-greg2/users/gilton/test_imgs/deblur/res/" + str(tt + k) + "_" + str(bb) + ".png"
# filename = "/share/data/vision-greg2/users/gilton/test_imgs/mrie2e/res/" + str(tt + k) + "_" + str(bb) + ".png"
# #filename = "/share/data/vision-greg2/users/gilton/test_imgs/cs/res/" + str(tt + k) + "_" + str(bb) + ".png"
#
# normalized_res = np.clip(residual[k, :, :] * 8, 0, 1) * 255.0
# # print(np.shape(normalized_res))
# # exit()
# normalized_res = normalized_res.astype(np.uint8)
# output_img = Image.fromarray(normalized_res)
# output_img = output_img.resize((512, 512), resample=Image.NEAREST)
# imageio.imwrite(filename, output_img, format='PNG-PIL')
#
# tt += bsz
return running_term
# tt=0
class ProxgradNetMulti(nn.Module):
def __init__(self, linear_operator, nonlinear_operators, eta_initial_val=0.1):
super(ProxgradNetMulti,self).__init__()
self.linear_op = linear_operator
self.nonlinear_ops = torch.nn.ModuleList(nonlinear_operators)
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
# This is a bit redundant
def initial_point(self, y):
return self._linear_adjoint(y)
def initial_point_precond(self, y):
initial_point = self._linear_adjoint(y)
preconditioned_input = conjugate_gradient(initial_point, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=60)
return preconditioned_input
def single_block(self, input, y, iterate):
grad_update = input - self.eta * (self.linear_op.gramian(input) - self._linear_adjoint(y))
return self.nonlinear_ops[iterate](grad_update) + grad_update
def forward(self, y, iterations):
initial_point = self.eta * self.initial_point_precond(y)
running_term = initial_point
# bsz = initial_point.shape[0]
# past_iterate = initial_point
for bb in range(iterations):
running_term = self.single_block(running_term, y, bb)
# #img_array = (np.clip(np.transpose(running_term.cpu().detach().numpy(), (0, 2, 3, 1)), -1,
# # 1) + 1.0) * 127.5
# img_array = torch.norm(running_term, dim=1).cpu().detach().numpy() * 255.0 / np.sqrt(2)
# img_array = img_array.astype(np.uint8)
#
# residual = torch.norm(running_term - past_iterate, dim=1).cpu().detach().numpy()
# if bb % 10 == 2:
# for k in range(bsz):
# #filename = "/share/data/vision-greg2/users/gilton/test_imgs/deblur/img/" + str(tt + k) + "_" + str(bb) + ".png"
# filename = "/share/data/vision-greg2/users/gilton/test_imgs/mrie2e/img/" + str(tt + k) + "_" + str(bb) + ".png"
# #filename = "/share/data/vision-greg2/users/gilton/test_imgs/cs/img/" + str(tt + k) + "_" + str(bb) + ".png"
# output_img = Image.fromarray(img_array[k, ...])
# output_img = output_img.resize((512, 512), resample=Image.NEAREST)
# imageio.imwrite(filename, output_img, format='PNG-PIL')
#
# #filename = "/share/data/vision-greg2/users/gilton/test_imgs/deblur/res/" + str(tt + k) + "_" + str(bb) + ".png"
# filename = "/share/data/vision-greg2/users/gilton/test_imgs/mrie2e/res/" + str(tt + k) + "_" + str(bb) + ".png"
# #filename = "/share/data/vision-greg2/users/gilton/test_imgs/cs/res/" + str(tt + k) + "_" + str(bb) + ".png"
#
# normalized_res = np.clip(residual[k, :, :] * 8, 0, 1) * 255.0
# # print(np.shape(normalized_res))
# # exit()
# normalized_res = normalized_res.astype(np.uint8)
# output_img = Image.fromarray(normalized_res)
# output_img = output_img.resize((512, 512), resample=Image.NEAREST)
# imageio.imwrite(filename, output_img, format='PNG-PIL')
#
# tt += bsz
return running_term
class PrecondNeumannNet(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, lambda_initial_val=0.1, cg_iterations=10):
super(PrecondNeumannNet,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.cg_iterations = cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(lambda_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
# This is a bit redundant
def initial_point(self, y):
preconditioned_input = conjugate_gradient(y, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return preconditioned_input
def single_block(self, input):
preconditioned_step = conjugate_gradient(input, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return self.eta * preconditioned_step - self.nonlinear_op(input)
def forward(self, y, iterations):
initial_point = self.eta * self.initial_point(y)
running_term = initial_point
accumulator = initial_point
for bb in range(iterations):
running_term = self.single_block(running_term)
accumulator = accumulator + running_term
return accumulator
| 9,973 | 48.376238 | 134 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/solvers/gradnet.py | import torch.nn as nn
import torch
from solvers.cg_utils import conjugate_gradient
from PIL import Image
import imageio
import numpy as np
tt = 0
class GradNet(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta_initial_val=0.1):
super(GradNet,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
# This is a bit redundant
def initial_point(self, y):
return self._linear_adjoint(y)
def initial_point_precond(self, y):
initial_point = self._linear_adjoint(y)
preconditioned_input = conjugate_gradient(initial_point, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=60)
return preconditioned_input
def single_block(self, input, y):
grad_update = self.linear_op.gramian(input) - self._linear_adjoint(y) - self.nonlinear_op(input)
return input - self.eta * grad_update
def forward(self, y, iterations):
initial_point = self.initial_point_precond(y)
running_term = initial_point
# global tt
# bsz = initial_point.shape[0]
# past_iterate = initial_point
for bb in range(iterations):
running_term = self.single_block(running_term, y)
# # img_array = (np.clip(np.transpose(running_term.cpu().detach().numpy(), (0, 2, 3, 1)), -1,
# # 1) + 1.0) * 127.5
# img_array = torch.norm(running_term, dim=1).cpu().detach().numpy() * 255.0 / np.sqrt(2)
# img_array = img_array.astype(np.uint8)
#
# residual = torch.norm(running_term - past_iterate, dim=1).cpu().detach().numpy()
# if bb % 10 == 0:
# for k in range(bsz):
# # filename = "/share/data/vision-greg2/users/gilton/test_imgs/deblur/img/" + str(tt + k) + "_" + str(bb) + ".png"
# filename = "/share/data/vision-greg2/users/gilton/test_imgs/mrie2e/img/" + str(tt + k) + "_" + str(
# bb) + ".png"
# # filename = "/share/data/vision-greg2/users/gilton/test_imgs/cs/img/" + str(tt + k) + "_" + str(bb) + ".png"
# output_img = Image.fromarray(img_array[k, ...])
# output_img = output_img.resize((512, 512), resample=Image.NEAREST)
# imageio.imwrite(filename, output_img, format='PNG-PIL')
#
# # filename = "/share/data/vision-greg2/users/gilton/test_imgs/deblur/res/" + str(tt + k) + "_" + str(bb) + ".png"
# filename = "/share/data/vision-greg2/users/gilton/test_imgs/mrie2e/res/" + str(tt + k) + "_" + str(
# bb) + ".png"
# # filename = "/share/data/vision-greg2/users/gilton/test_imgs/cs/res/" + str(tt + k) + "_" + str(bb) + ".png"
#
# normalized_res = np.clip(residual[k, :, :] * 8, 0, 1) * 255.0
# # print(np.shape(normalized_res))
# # exit()
# normalized_res = normalized_res.astype(np.uint8)
# output_img = Image.fromarray(normalized_res)
# output_img = output_img.resize((512, 512), resample=Image.NEAREST)
# imageio.imwrite(filename, output_img, format='PNG-PIL')
#
# tt += bsz
return running_term
class PrecondNeumannNet(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, lambda_initial_val=0.1, cg_iterations=10):
super(PrecondNeumannNet,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.cg_iterations = cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(lambda_initial_val), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
# This is a bit redundant
def initial_point(self, y):
preconditioned_input = conjugate_gradient(y, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return preconditioned_input
def single_block(self, input):
preconditioned_step = conjugate_gradient(input, self.linear_op.gramian, regularization_lambda=self.eta,
n_iterations=self.cg_iterations)
return self.eta * preconditioned_step - self.nonlinear_op(input)
def forward(self, y, iterations):
initial_point = self.eta * self.initial_point(y)
running_term = initial_point
accumulator = initial_point
for bb in range(iterations):
running_term = self.single_block(running_term)
accumulator = accumulator + running_term
return accumulator
| 6,039 | 45.10687 | 135 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/solvers/new_equilibrium_utils.py | import torch.nn as nn
import torch
import matplotlib
#matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import imageio
import numpy as np
from PIL import Image
def complex_conj(x):
assert x.shape[1] == 2
return torch.stack((x[:,0, ...], -x[:,1,...]), dim=1)
def torchdotproduct(x,y):
# if complexdata:
# y = complex_conj(y)
return torch.sum(x*y,dim=[1,2,3])
def single_cg_iteration(x, d, g, b, ATA, regularization_lambda):
def regATA(input, ATA):
return ATA(input) + regularization_lambda*input
Qd = regATA(d, ATA)
dQd = torchdotproduct(d, Qd)
alpha = -torchdotproduct(g,d) / dQd
alpha = alpha.view((-1,1,1,1))
x = x + alpha * d
g = regATA(x, ATA) - b
gQd = torchdotproduct(g, Qd)
beta = gQd / dQd
beta = beta.view((-1,1,1,1))
d = -g + beta*d
return x, d, g
# This function solves the system ATA x = ATy, where initial_point is supposed
# to be ATy. This can be backpropagated through.
def conjugate_gradient(initial_point, ATA, regularization_lambda, n_iterations=10):
x = torch.zeros_like(initial_point)
d = initial_point
g = -d
for ii in range(n_iterations):
x, d, g = single_cg_iteration(x, d, g, initial_point, ATA, regularization_lambda)
return x
def complex_dotproduct(x, y):
return torchdotproduct(complex_conj(x), y)
def single_cg_iteration_MRI(rTr, x, r, p, ATA, regularization_lambda):
batch_size = x.shape[0]
def regATA(input):
return ATA(input) + regularization_lambda*input
Ap = regATA(p)
rTr = rTr.view(batch_size, 1, 1, 1)
alpha = rTr / complex_dotproduct(p, Ap).view(batch_size, 1, 1, 1)
x_new = x + alpha * p
r_new = r - alpha * Ap
rTr_new = complex_dotproduct(r_new, r_new)
rTr_new = rTr_new.view(batch_size, 1, 1, 1)
beta = rTr_new / rTr
p_new = r + beta * p
return rTr_new, x_new, r_new, p_new
def conjugate_gradient_MRI(initial_point, ATA, regularization_lambda, n_iterations=10):
'''Strightforward implementation of MoDLs code'''
x = torch.zeros_like(initial_point)
r = initial_point
p = initial_point
rTr = complex_dotproduct(r, r)
for ii in range(n_iterations):
rTr, x, r, p = single_cg_iteration_MRI(rTr, x, r, p, ATA, regularization_lambda)
return x
def jacobian_vector_product(g, z, v):
JTv = torch.autograd.grad(outputs=g, inputs=z, grad_outputs=v)[0]
return JTv
def conjugate_gradient_equilibriumgrad(b, input_z, f_function, n_iterations=10):
initial_guess = b.clone()
x_k = initial_guess
r_k = b
p_k = r_k
batch_size = b.shape[0]
g = f_function(input_z) - input_z
for ii in range(n_iterations):
# g = f_function(initial_guess) - initial_guess
# Ap_k = jacobian_vector_product(g, input_z, x_k)
Ap_k = (torch.autograd.grad(outputs=g, inputs=input_z, grad_outputs=x_k, retain_graph=True)[0] + 0.00001 * x_k)
rTr_k = torchdotproduct(r_k, r_k)
rTr_k = rTr_k.view(batch_size, 1, 1, 1)
pAp_k = torchdotproduct(Ap_k, p_k)
pAp_k = pAp_k.view(batch_size, 1, 1, 1)
alpha = rTr_k / pAp_k
x_k = x_k + alpha * p_k
r_kplus1 = r_k - alpha * Ap_k
rTr_kplus1 = torchdotproduct(r_kplus1, r_kplus1)
rTr_kplus1 = rTr_kplus1.view(batch_size, 1, 1, 1)
beta = rTr_k / rTr_kplus1
p_k = r_kplus1 + beta * p_k
r_k = r_kplus1
return x_k
#tt= 0
def anderson(f, x0, m=5, lam=1e-4, max_iter=50, tol=1e-2, beta=1.0):
""" Anderson acceleration for fixed point iteration.
This was taken from the Deep Equilibrium tutorial here: http://implicit-layers-tutorial.org/deep_equilibrium_models/
"""
#global tt
bsz, d, H, W = x0.shape
X = torch.zeros(bsz, m, d * H * W, dtype=x0.dtype, device=x0.device)
F = torch.zeros(bsz, m, d * H * W, dtype=x0.dtype, device=x0.device)
X[:, 0], F[:, 0] = x0.reshape(bsz, -1), f(x0).reshape(bsz, -1)
X[:, 1], F[:, 1] = F[:, 0], f(F[:, 0].reshape(x0.shape)).reshape(bsz, -1)
H = torch.zeros(bsz, m + 1, m + 1, dtype=x0.dtype, device=x0.device)
H[:, 0, 1:] = H[:, 1:, 0] = 1
y = torch.zeros(bsz, m + 1, 1, dtype=x0.dtype, device=x0.device)
y[:, 0] = 1
res = []
current_k = 0
past_iterate = x0
for k in range(2, max_iter):
current_k = k
n = min(k, m)
G = F[:, :n] - X[:, :n]
H[:, 1:n + 1, 1:n + 1] = torch.bmm(G, G.transpose(1, 2)) + lam * torch.eye(n, dtype=x0.dtype, device=x0.device)[
None]
alpha = torch.solve(y[:, :n + 1], H[:, :n + 1, :n + 1])[0][:, 1:n + 1, 0] # (bsz x n)
X[:, k % m] = beta * (alpha[:, None] @ F[:, :n])[:, 0] + (1 - beta) * (alpha[:, None] @ X[:, :n])[:, 0]
current_iterate = beta * (alpha[:, None] @ F[:, :n])[:, 0] + (1 - beta) * (alpha[:, None] @ X[:, :n])[:, 0]
F[:, k % m] = f(X[:, k % m].reshape(x0.shape)).reshape(bsz, -1)
res.append((F[:, k % m] - X[:, k % m]).norm().item() / (1e-5 + F[:, k % m].norm().item()))
if (res[-1] < tol):
break
#tt += bsz
return X[:, current_k % m].view_as(x0), res
def andersonexp(f, x0, m=5, lam=1e-4, max_iter=50, tol=1e-2, beta=1.0):
""" Anderson acceleration for fixed point iteration. """
# global tt
bsz, d, H, W = x0.shape
X = torch.zeros(bsz, m, d * H * W, dtype=x0.dtype, device=x0.device)
F = torch.zeros(bsz, m, d * H * W, dtype=x0.dtype, device=x0.device)
X[:, 0], F[:, 0] = x0.reshape(bsz, -1), f(x0).reshape(bsz, -1)
X[:, 1], F[:, 1] = F[:, 0], f(F[:, 0].reshape(x0.shape)).reshape(bsz, -1)
H = torch.zeros(bsz, m + 1, m + 1, dtype=x0.dtype, device=x0.device)
H[:, 0, 1:] = H[:, 1:, 0] = 1
y = torch.zeros(bsz, m + 1, 1, dtype=x0.dtype, device=x0.device)
y[:, 0] = 1
current_k = 0
for k in range(2, max_iter):
current_k = k
n = min(k, m)
G = F[:, :n] - X[:, :n]
H[:, 1:n + 1, 1:n + 1] = torch.bmm(G, G.transpose(1, 2)) + lam * torch.eye(n, dtype=x0.dtype, device=x0.device)[
None]
alpha = torch.solve(y[:, :n + 1], H[:, :n + 1, :n + 1])[0][:, 1:n + 1, 0] # (bsz x n)
X[:, k % m] = beta * (alpha[:, None] @ F[:, :n])[:, 0] + (1 - beta) * (alpha[:, None] @ X[:, :n])[:, 0]
F[:, k % m] = f(X[:, k % m].reshape(x0.shape)).reshape(bsz, -1)
res = (F[:, k % m] - X[:, k % m]).norm().item() / (1e-5 + F[:, k % m].norm().item())
if (res < tol):
break
# tt += bsz
return X[:, current_k % m].view_as(x0), res
def L2Norm(x):
return torch.sum(x**2, dim=[1,2,3], keepdim=True)
def epsilon2(f, x0, max_iter=50, tol=1e-2, lam=1e-4):
x = x0
for k in range(max_iter):
f_x = f(x)
delta_x = f_x - x
delta_f = f(f_x) - f_x
delta2_x = delta_f - delta_x
# term1 = delta_f * L2Norm(delta_x)
# term2 = delta_x * L2Norm(delta_f)
x_new = f_x + (delta_f * L2Norm(delta_x) - delta_x * L2Norm(delta_f)) / (L2Norm(delta2_x) + lam)
residual = (x_new - x).norm().item() / x_new.norm().item()
x = x_new
if (residual < tol):
break
return x, residual
def forward_iteration(f, x0, max_iter=50, tol=1e-5):
f0 = f(x0)
res = []
for k in range(max_iter):
x = f0
f0 = f(x)
res.append((f0 - x).norm().item() / (1e-7 + f0.norm().item()))
if (res[-1] < tol):
break
return f0, res
def forward_iteration_plot(f, x0, max_iter=50, tol=1e-5):
f0 = f(x0)
res = []
fig = plt.figure()
for k in range(max_iter):
x = f0
f0 = f(x)
# sub = fig.add_subplot(10,10, k)
# plt.imshow(f0[0, : , :, :].detach().cpu().numpy())
# plt.show()
res.append((f0 - x).norm().item() / (1e-7 + f0.norm().item()))
if (res[-1] < tol):
break
plt.show()
return f0, res
class DEQFixedPoint(nn.Module):
def __init__(self, f, solver, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.kwargs = kwargs
def forward(self, x, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), init_point, **self.kwargs)
z = self.f(z, x)
# set up Jacobian vector product (without additional forward calls)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
def backward_hook(grad):
g, self.backward_res = self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
grad, **self.kwargs)
return g
z.register_hook(backward_hook)
return z
class DEQFixedPointExp(nn.Module):
def __init__(self, f, solver, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.kwargs = kwargs
def forward(self, x, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), init_point, **self.kwargs)
z = self.f(z, x)
# set up Jacobian vector product (without additional forward calls)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
def backward_hook(grad):
g, self.backward_res = self.solver(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0] + grad,
grad, **self.kwargs)
return g
z.register_hook(backward_hook)
return z
class DEQFixedPointTest(nn.Module):
def __init__(self, f, solver, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.kwargs = kwargs
def forward(self, x, truth = None, initial_point = None):
if initial_point is None:
init_point = torch.zeros_like(x)
else:
init_point = initial_point
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), init_point, **self.kwargs)
return z
def neumann_iteration(f, x0,k=10):
accumulator = x0
current_iterate = x0
for _ in range(k):
current_iterate = f(current_iterate)
accumulator = accumulator + current_iterate
return accumulator
class DEQFixedPointNeumann(nn.Module):
def __init__(self, f, solver, neumann_k, **kwargs):
super().__init__()
self.f = f
self.solver = solver
self.neumann_k = neumann_k
self.kwargs = kwargs
def forward(self, x):
# compute forward pass and re-engage autograd tape
with torch.no_grad():
z, self.forward_res = self.solver(lambda z: self.f(z, x), torch.zeros_like(x), **self.kwargs)
z = self.f(z, x)
# set up Jacobian vector product (without additional forward calls)
z0 = z.clone().detach().requires_grad_()
f0 = self.f(z0, x)
def backward_hook(grad):
g = neumann_iteration(lambda y: torch.autograd.grad(f0, z0, y, retain_graph=True)[0],
grad, self.neumann_k)
return g
z.register_hook(backward_hook)
return z
def get_equilibrium_point(solver, z, max_iterations=50, tolerance = 0.001):
old_iterate = z
for iteration in range(max_iterations):
new_iterate = solver(old_iterate)
res = (new_iterate-old_iterate).norm().item() / (1e-5 + new_iterate.norm().item())
old_iterate = new_iterate
if res < 1e-3:
break
return new_iterate, new_iterate
def get_equilibrium_point_plot(solver, z, truth, max_iterations=50, tolerance = 0.001):
running_iterate = z
# fig = plt.figure()
jj = 0
for iteration in range(max_iterations):
# if iteration % 10 == 0:
# sub = fig.add_subplot(2, 5, jj+1)
# img_to_show = torch.abs(running_iterate[0, :, :, :] - truth[0,:,:,:])*5.0
# # plt.imshow((running_iterate[0, :, :, :].permute(1,2,0).cpu().detach().numpy() + 1.0) / 2.0)
# # plt.show()
# # sub.imshow((img_to_show.permute(1,2,0).detach().cpu().numpy() + 1.0)/2.0)
# sub.imshow(img_to_show.permute(1,2,0).detach().cpu().numpy())
#
# jj += 1
running_iterate = solver(running_iterate)
# plt.show()
return running_iterate, running_iterate
| 12,873 | 33.239362 | 120 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/solvers/cg_utils.py | import torch.nn as nn
import torch
def complex_conj(x):
assert x.shape[1] == 2
return torch.stack((x[:,0, ...], -x[:,1,...]), dim=1)
def torchdotproduct(x,y):
# if complexdata:
# y = complex_conj(y)
return torch.sum(x*y,dim=[1,2,3])
def single_cg_iteration(x, d, g, b, ATA, regularization_lambda):
def regATA(input, ATA):
return ATA(input) + regularization_lambda*input
Qd = regATA(d, ATA)
dQd = torchdotproduct(d, Qd)
alpha = -torchdotproduct(g,d) / dQd
alpha = alpha.view((-1,1,1,1))
x = x + alpha * d
g = regATA(x, ATA) - b
gQd = torchdotproduct(g, Qd)
beta = gQd / dQd
beta = beta.view((-1,1,1,1))
d = -g + beta*d
return x, d, g
# This function solves the system ATA x = ATy, where initial_point is supposed
# to be ATy. This can be backpropagated through.
def conjugate_gradient(initial_point, ATA, regularization_lambda, n_iterations=10):
x = torch.zeros_like(initial_point)
d = initial_point
g = -d
for ii in range(n_iterations):
x, d, g = single_cg_iteration(x, d, g, initial_point, ATA, regularization_lambda)
return x
def complex_dotproduct(x, y):
return torchdotproduct(complex_conj(x), y)
def single_cg_iteration_MRI(rTr, x, r, p, ATA, regularization_lambda):
batch_size = x.shape[0]
def regATA(input):
return ATA(input) + regularization_lambda*input
Ap = regATA(p)
rTr = rTr.view(batch_size, 1, 1, 1)
alpha = rTr / complex_dotproduct(p, Ap).view(batch_size, 1, 1, 1)
x_new = x + alpha * p
r_new = r - alpha * Ap
rTr_new = complex_dotproduct(r_new, r_new)
rTr_new = rTr_new.view(batch_size, 1, 1, 1)
beta = rTr_new / rTr
p_new = r + beta * p
return rTr_new, x_new, r_new, p_new
def conjugate_gradient_MRI(initial_point, ATA, regularization_lambda, n_iterations=10):
'''Strightforward implementation of MoDLs code'''
x = torch.zeros_like(initial_point)
r = initial_point
p = initial_point
rTr = complex_dotproduct(r, r)
for ii in range(n_iterations):
rTr, x, r, p = single_cg_iteration_MRI(rTr, x, r, p, ATA, regularization_lambda)
return x | 2,165 | 29.507042 | 89 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/solvers/equilibrium_solvers.py | import torch.nn as nn
import torch
import matplotlib
# matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from solvers.cg_utils import conjugate_gradient
class EquilibriumGrad(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, minval = -1, maxval = 1):
super(EquilibriumGrad,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
# self.eta = eta
self.minval = minval
self.maxval = maxval
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta), requires_grad=True))
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def set_initial_point(self, y):
self.initial_point = self._linear_adjoint(y)
def get_gradient(self, z, y):
return self.linear_op.gramian(z) - self._linear_adjoint(y) - self.nonlinear_op(z)
def forward(self, z, y):
z_tplus1 = z - self.eta * self.get_gradient(z, y)
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class EquilibriumProxGrad(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, minval = -1, maxval = 1):
super(EquilibriumProxGrad,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
self.register_parameter(name='eta', param=torch.nn.Parameter(torch.tensor(eta), requires_grad=True))
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def get_gradient(self, z, y):
return self.linear_op.gramian(z) - self._linear_adjoint(y)
def forward(self, z, y):
gradstep = z - self.eta * self.get_gradient(z, y)
z_tplus1 = gradstep + self.nonlinear_op(gradstep)
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class EquilibriumProxGradMRI(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, minval = -1, maxval = 1):
super(EquilibriumProxGradMRI,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
self.eta = eta
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def get_gradient(self, z, y):
return self.linear_op.gramian(z) - self._linear_adjoint(y)
def forward(self, z, y):
gradstep = z - self.eta * self.get_gradient(z, y)
z_tplus1 = gradstep + self.nonlinear_op(gradstep)
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class ProxPnP(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, minval = -1, maxval = 1):
super(ProxPnP,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
self.eta = eta
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def get_gradient(self, z, y):
return self.linear_op.adjoint(self.linear_op.forward(z) - y)
def forward(self, z, y):
gradstep = z - self.eta*(self.linear_op.adjoint(self.linear_op.forward(z)) - self.linear_op.adjoint(y))
z_tplus1 = gradstep + self.nonlinear_op(gradstep)
#z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class DouglasRachford(nn.Module):
def __init__(self, linear_operator, nonlinear_operator, eta, max_iters = 10, minval = -1, maxval = 1):
super(DouglasRachford,self).__init__()
self.linear_op = linear_operator
self.nonlinear_op = nonlinear_operator
self.minval = minval
self.maxval = maxval
self.lambdaval = eta
self.max_cg_iterations = max_iters
def _linear_op(self, x):
return self.linear_op.forward(x)
def internal_prox(self, x, y):
initial_point = self.linear_op.adjoint(y) + self.lambdaval*x
return conjugate_gradient(initial_point, self.linear_op.gramian, self.lambdaval,
n_iterations=self.max_cg_iterations)
def get_gradient(self, z, y):
return self.linear_op.adjoint(self.linear_op.forward(z) - y)
def forward(self, z, y):
prox_f = self.internal_prox(z, y)
net_input = 2*prox_f - z
z_tplus1 = (z + 2*(self.nonlinear_op(net_input) + net_input)-net_input) / 2.0
z_tplus1 = torch.clamp(z_tplus1, self.minval, self.maxval)
return z_tplus1
class EquilibriumADMM(nn.Module):
def __init__(self, linear_operator, denoising_net, max_cg_iterations=20, x_alpha=0.4, eta = 0.1, minval=-1, maxval=1):
super(EquilibriumADMM, self).__init__()
self.linear_op = linear_operator
self.denoising_net = denoising_net
self.minval = minval
self.maxval = maxval
self.x_alpha = x_alpha
self.eta = eta
self.max_cg_iters = max_cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def _x_update(self, z, u, y):
gramian = self.linear_op.gramian
# initial_point = self._linear_adjoint(y) + 0.0000001 * (z - u)
initial_point = self._linear_adjoint(y) + self.x_alpha*(z-u)
x_update = conjugate_gradient(initial_point, gramian, self.x_alpha, n_iterations=self.max_cg_iters)
return x_update, z, u
def _z_update(self, x, z, u):
net_input = x + u
z_update = net_input + self.denoising_net(net_input)
return x, z_update, u
def _u_update(self, x, z, u):
u_update = u + self.eta * (x - z)
# u_update = u + z - x
return x, z, u_update
def forward(self, z, u, y):
x_new, z, u = self._x_update(z, u, y)
x_new, z_new, u = self._z_update(x_new, z, u)
x_new, z_new, u_new = self._u_update(x_new, z_new, u)
z_new = torch.clamp(z_new, self.minval, self.maxval)
return z_new, u_new
class EquilibriumADMM2(nn.Module):
def __init__(self, linear_operator, denoising_net, max_cg_iterations=20, x_alpha=0.4, eta = 0.1, minval=-1, maxval=1):
super(EquilibriumADMM2, self).__init__()
self.linear_op = linear_operator
self.denoising_net = denoising_net
self.minval = minval
self.maxval = maxval
self.x_alpha = x_alpha
self.eta = eta
self.max_cg_iters = max_cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def _x_update(self, z, u, y):
gramian = self.linear_op.gramian
# initial_point = self._linear_adjoint(y) + 0.0000001 * (z - u)
initial_point = self._linear_adjoint(y) + self.x_alpha*(z-u)
x_update = conjugate_gradient(initial_point, gramian, self.x_alpha, n_iterations=self.max_cg_iters)
return x_update, z, u
def _z_update(self, x, z, u):
net_input = x + u
z_update = net_input - self.denoising_net(net_input)
return x, z_update, u
def _u_update(self, x, z, u):
u_update = u + self.eta * (x - z)
# u_update = u + z - x
return x, z, u_update
def forward(self, z, u, y):
x_new, z, u = self._x_update(z, u, y)
x_new, z_new, u = self._z_update(x_new, z, u)
x_new, z_new, u_new = self._u_update(x_new, z_new, u)
z_new = torch.clamp(z_new, self.minval, self.maxval)
return z_new, u_new
class EquilibriumADMM_minus(nn.Module):
def __init__(self, linear_operator, denoising_net, max_cg_iterations=20, x_alpha=0.4, eta = 0.1, minval=-1, maxval=1):
super(EquilibriumADMM_minus, self).__init__()
self.linear_op = linear_operator
self.denoising_net = denoising_net
self.minval = minval
self.maxval = maxval
self.x_alpha = x_alpha
self.eta = eta
self.max_cg_iters = max_cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def _x_update(self, z, u, y):
net_input = z - u
x_update = net_input - self.denoising_net(net_input)
return x_update, z, u
def _z_update(self, x, u, y):
gramian = self.linear_op.gramian
# initial_point = self._linear_adjoint(y) + 0.0000001 * (z - u)
initial_point = self._linear_adjoint(y) + self.x_alpha*(x+u)
z_update = conjugate_gradient(initial_point, gramian, self.x_alpha, n_iterations=self.max_cg_iters)
return x, z_update, u
def _u_update(self, x, z, u):
u_update = u + self.eta * (x - z)
# u_update = u + z - x
return x, z, u_update
def forward(self, z, u, y):
x_new, z, u = self._x_update(z, u, y)
x_new, z_new, u = self._z_update(x_new, u, y)
x_new, z_new, u_new = self._u_update(x_new, z_new, u)
z_new = torch.clamp(z_new, self.minval, self.maxval)
return z_new, u_new
class EquilibriumADMM_plus(nn.Module):
def __init__(self, linear_operator, denoising_net, max_cg_iterations=20, x_alpha=0.4, eta = 0.1, minval=-1, maxval=1):
super(EquilibriumADMM_plus, self).__init__()
self.linear_op = linear_operator
self.denoising_net = denoising_net
self.minval = minval
self.maxval = maxval
self.x_alpha = x_alpha
self.eta = eta
self.max_cg_iters = max_cg_iterations
# Check if the linear operator has parameters that can be learned:
# if so, register them to be learned as part of the network.
linear_param_name = 'linear_param_'
for ii, parameter in enumerate(self.linear_op.parameters()):
parameter_name = linear_param_name + str(ii)
self.register_parameter(name=parameter_name, param=parameter)
def _linear_op(self, x):
return self.linear_op.forward(x)
def _linear_adjoint(self, x):
return self.linear_op.adjoint(x)
def _x_update(self, z, u, y):
net_input = z - u
x_update = net_input + self.denoising_net(net_input)
return x_update, z, u
def _z_update(self, x, u, y):
gramian = self.linear_op.gramian
# initial_point = self._linear_adjoint(y) + 0.0000001 * (z - u)
initial_point = self._linear_adjoint(y) + self.x_alpha*(x+u)
z_update = conjugate_gradient(initial_point, gramian, self.x_alpha, n_iterations=self.max_cg_iters)
return x, z_update, u
def _u_update(self, x, z, u):
u_update = u + self.eta * (x - z)
# u_update = u + z - x
return x, z, u_update
def forward(self, z, u, y):
x_new, z, u = self._x_update(z, u, y)
x_new, z_new, u = self._z_update(x_new, u, y)
x_new, z_new, u_new = self._u_update(x_new, z_new, u)
z_new = torch.clamp(z_new, self.minval, self.maxval)
return z_new, u_new | 13,787 | 36.16442 | 122 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/utils/fastmri_dataloader.py | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import os, re, random, h5py, ismrmrd
from PIL import Image
from torch.utils.data import Dataset
from utils import forward_models_mri
def directory_filelist(target_directory):
file_list = [f for f in os.listdir(target_directory)
if os.path.isfile(os.path.join(target_directory, f))]
file_list = list(file_list)
file_list = [f for f in file_list if not f.startswith('.')]
return file_list
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def center_crop_slice(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[0]
assert 0 < shape[1] <= data.shape[1]
w_from = (data.shape[0] - shape[0]) // 2
h_from = (data.shape[1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[w_from:w_to, h_from:h_to, ...]
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def normalize(
data,
mean,
stddev,
eps = 0.0,
):
"""
Normalize the given tensor.
Applies the formula (data - mean) / (stddev + eps).
Args:
data: Input data to be normalized.
mean: Mean value.
stddev: Standard deviation.
eps: Added to stddev to prevent dividing by zero.
Returns:
Normalized tensor.
"""
return (data - mean) / (stddev + eps)
def normalize_instance(
data, eps = 0.0):
"""
Normalize the given tensor with instance norm/
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
are computed from the data itself.
Args:
data: Input data to be normalized
eps: Added to stddev to prevent dividing by zero.
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
class singleCoilFastMRIDataloader(Dataset):
def __init__(self, dataset_location, transform=None, data_indices=None, sketchynormalize=True):
"""
Args:
mask_func (common.subsample.MaskFunc): A function that can create a mask of
appropriate shape.
resolution (int): Resolution of the image.
use_seed (bool): If true, this class computes a pseudo random number generator seed
from the filename. This ensures that the same mask is used for all the slices of
a given volume every time.
"""
self.transform = transform
if data_indices is not None:
filelist = directory_filelist(dataset_location)
# print(filelist[0])
print(len(filelist))
try:
self.filelist = [filelist[x] for x in data_indices]
self.filelist[0] = 'file1002332_23.h5'
self.filelist[0] = 'file1002444_18.h5'
except IndexError:
print(data_indices)
exit()
else:
self.filelist = directory_filelist(dataset_location)
self.data_directory = dataset_location
self.fft = forward_models_mri.toKspace()
self.ifft = forward_models_mri.fromKspace()
self.sketchynormalize = sketchynormalize
def __len__(self):
return len(self.filelist)
def __getitem__(self, item):
"""
Args:
kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
data or (rows, cols, 2) for single coil data.
mask (numpy.array): Mask from the test dataset
target (numpy.array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object.
fname (str): File name
slice (int): Serial number of the slice.
Returns:
(tuple): tuple containing:
image (torch.Tensor): Zero-filled input image.
target (torch.Tensor): Target image converted to a torch Tensor.
mean (float): Mean value used for normalization.
std (float): Standard deviation value used for normalization.
"""
filename = self.filelist[item]
data = h5py.File(self.data_directory + filename, 'r')
# print(str(item) + ": " + str(filename))
# exit()w
kspace = to_tensor(data.get('kspace').value)
kspace_cropped = center_crop_slice(kspace, shape=[320, 320])#.permute((2,0,1))
input_img = forward_models_mri.ifft2(kspace_cropped).permute((2,0,1))
image_space = forward_models_mri.ifft2(kspace)
target_img = center_crop_slice(image_space, shape=[320, 320]).permute((2,0,1))
# image = complex_abs(image_space).permute((2,0,1))
target_img, mean, std = normalize_instance(target_img, eps=1e-11)
target_img = target_img.clamp(-6, 6)
input_img, mean, std = normalize_instance(input_img, eps=1e-11)
input_img = input_img.clamp(-6, 6)
# if self.sketchynormalize:
# don't ask
# image_space *= 666.666
# image_space *= 2000
# image_space = image_space.clamp(min=-1, max=1)
return input_img, target_img
| 6,291 | 35.581395 | 99 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/utils/celeba_dataloader.py | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import os, re, random
from PIL import Image
def swap_patches(batch, index1, index2, h,w, patch_top_loc, patch_left_loc):
tmp = batch[
index1,
patch_top_loc:patch_top_loc+h,
patch_left_loc:patch_left_loc+w, :].clone()
batch[
index1,
patch_top_loc:patch_top_loc+h,
patch_left_loc:patch_left_loc+w, :] = batch[
index2,
patch_top_loc:patch_top_loc+h,
patch_left_loc:patch_left_loc+w, :].clone()
batch[
index1,
patch_top_loc:patch_top_loc+h,
patch_left_loc:patch_left_loc+w, :] = tmp
return batch
#TODO this should probably be moved to another file as it's a transform
# Although it needs to operate on the batch as opposed to individual images
def batch_patch_swap(batch, h=None, w=None, ):
'''
Args:
batch: batch x h x w x ch of images of type torch tensor
h: height of patch to be swapped, if greater than batch.size()[1] then will be capped to that
w: width of patch, similar capping will be done as height
'''
#TODO maybe we specify h and w as a range instead?
num_images, height, width, _ = batch.size()
#TODO maybe crop from different locations of the image?
patch_top_loc = random.randint(1, int((3.0/4)*height))
patch_left_loc = random.randint(1, int((3.0/4)*width))
if (not h) or (h >= height) or (h + patch_top_loc >= height):
h = random.randint(1, height-patch_top_loc)
if (not w) or (w >= width):
h = random.randint(1, width-patch_left_loc)
for index in range(num_images):
swap_index = random.randint(0, num_images-1)
batch = swap_patches(batch, index, swap_index, h,w, patch_top_loc, patch_left_loc)
return batch
def sorted_nicely(l):
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
def directory_filelist(target_directory):
file_list = [f for f in sorted(os.listdir(target_directory))
if os.path.isfile(os.path.join(target_directory, f))]
file_list = list(file_list)
file_list = [f for f in file_list if not f.startswith('.')]
return file_list
def load_img(file_name):
with open(file_name,'rb') as f:
img = Image.open(f).convert("RGB")
return img
class FolderDataset(Dataset):
def __init__(self, target_directory, transform=None):
filelist = directory_filelist(target_directory)
self.full_filelist = [target_directory + single_file for single_file in filelist]
self.transform = transform
def __len__(self):
return len(self.full_filelist)
def __getitem__(self, item):
image_name = self.full_filelist[item]
data = load_img(image_name)
if self.transform is not None:
data = self.transform(data)
return data
class CelebaDataset(Dataset):
def __init__(self, target_directory, validation_data=False, transform=None):
filelist = directory_filelist(target_directory)
training_data = filelist[:162770]
val_data = filelist[162770:182638]
test_data = filelist[182638:]
if validation_data:
self.full_filelist = [target_directory + single_file for single_file in val_data]
else:
self.full_filelist = [target_directory + single_file for single_file in training_data]
self.transform = transform
def __len__(self):
return len(self.full_filelist)
def __getitem__(self, item):
image_name = self.full_filelist[item]
data = load_img(image_name)
if self.transform is not None:
data = self.transform(data)
return data
class CelebaTrainingDatasetSubset(Dataset):
def __init__(self, target_directory, subset_indices, transform=None):
filelist = directory_filelist(target_directory)
training_data = filelist[:162770]
try:
training_data = [training_data[x] for x in subset_indices]
except TypeError:
training_data = [training_data[subset_indices]]
self.full_filelist = [target_directory + single_file for single_file in training_data]
self.transform = transform
def __len__(self):
return len(self.full_filelist)
def __getitem__(self, item):
image_name = self.full_filelist[item]
data = load_img(image_name)
if self.transform is not None:
data = self.transform(data)
return data
# This can be removed I think it's the same class as the one above at least right now.
class CelebaTestDataset(Dataset):
def __init__(self, target_directory, transform=None):
filelist = directory_filelist(target_directory)
training_data = filelist[:162770]
val_data = filelist[162770:182638]
test_data = filelist[182638:]
self.full_filelist = [target_directory + single_file for single_file in test_data]
self.transform = transform
def __len__(self):
return len(self.full_filelist)
def __getitem__(self, item):
image_name = self.full_filelist[item]
data = load_img(image_name)
if self.transform is not None:
data = self.transform(data)
return data
| 5,423 | 33.769231 | 101 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/utils/testing_utils.py | from PIL import Image
import torch
import matplotlib.pyplot as plt
import numpy as np
import imageio
from PIL import Image
def save_tensor_as_color_img(img_tensor, filename):
np_array = img_tensor.cpu().detach().numpy()
imageio.save(filename, np_array)
def save_batch_as_color_imgs(tensor_batch, batch_size, ii, folder_name, names):
# img_array = (np.transpose(tensor_batch.cpu().detach().numpy(),(0,2,3,1)) + 1.0) * 127.5
img_array = (np.clip(np.transpose(tensor_batch.cpu().detach().numpy(),(0,2,3,1)),-1,1) + 1.0) * 127.5
# img_array = tensor_batch.cpu().detach().numpy()
# print(np.max(img_array[:]))
# print(np.min(img_array[:]))
img_array = img_array.astype(np.uint8)
for kk in range(batch_size):
desired_img = Image.fromarray(img_array[kk,...])
desired_img = desired_img.resize((512,512), resample=Image.NEAREST)
img_number = batch_size*ii + kk
filename = folder_name + str(img_number) + "_" + str(names[kk]) + ".png"
# print(np.shape(img_array))
# print(filename)
imageio.imwrite(filename, desired_img)
def save_mri_as_imgs(tensor_batch, batch_size, ii, folder_name, names):
# img_array = (np.transpose(tensor_batch.cpu().detach().numpy(),(0,2,3,1)) + 1.0) * 127.5
def rescale_to_01(input):
batch_size = input.shape[0]
for bb in range(batch_size):
flattened_img = torch.flatten(input[bb, ...], start_dim=0)
img_min = torch.min(flattened_img)
img_max = torch.max(flattened_img - img_min)
input[bb, ...] = (input[bb, ...] - img_min) / img_max
return input
tensor_batch = torch.norm(tensor_batch, dim=1)
tensor_batch = rescale_to_01(tensor_batch)
# img_array = torch.norm(tensor_batch, dim=1).cpu().detach().numpy()
img_array = tensor_batch.cpu().detach().numpy()
for kk in range(batch_size):
img_number = batch_size*ii + kk
target_img = img_array[kk,...] * 255.0
target_img = target_img.astype(np.uint8)
desired_img = Image.fromarray(target_img)
desired_img = desired_img.resize((512, 512), resample=Image.NEAREST)
filename = folder_name + str(img_number) + "_" + str(names[kk]) + ".png"
# plt.imshow(np.sqrt(img_array[kk,0,:,:]**2 + img_array[kk,1,:,:]**2))
# plt.gray()
# plt.xticks([])
# plt.yticks([])
# plt.savefig(filename, bbox_inches='tight')
imageio.imwrite(filename, desired_img, format="PNG-PIL")
| 2,513 | 40.9 | 106 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/utils/spectral_norm.py | """
Spectral Normalization borrowed from https://arxiv.org/abs/1802.05957
Real SN by convolution. Each layer has lipschtz constant of 1
"""
import torch
from torch.nn.functional import conv2d, conv_transpose2d
from torch.nn.parameter import Parameter
# import argparse
# from ..train_realSN import opt
# import torch.jit._unwrap_optional
def normalize(tensor, eps=1e-12):
norm = torch.sqrt(torch.sum(tensor * tensor))
# if out is None:
# ret = tensor / norm
# else:
# ret = torch.div(tensor, norm, out=torch.jit._unwrap_optional(out))
ans = tensor / (eps + norm)
return ans
class SpectralNorm(object):
def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12):
self.name = name
self.dim = dim
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
'got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
weight_mat = weight
# out, In, kernel0, kernel1 = weight_mat.shape
'''
if self.dim != 0:
# permute dim to front
weight_mat = weight_mat.permute(self.dim,
*[d for d in range(weight_mat.dim()) if d != self.dim])
height = weight_mat.size(0)
weight_mat = weight_mat.reshape(height, -1)
'''
with torch.no_grad():
for _ in range(self.n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
# v = normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
# u = normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
# v = normalize(conv2d(u, weight_mat.permute(1,0,2,3), padding=1), dim=0, eps=self.eps)
# u = normalize(conv2d(v, weight_mat, padding=1), dim=0, eps=self.eps)
v = normalize(conv2d(u.flip(2,3), weight_mat.permute(1, 0, 2, 3), padding=2),
eps=self.eps).flip(2,3)[:,:,1:-1,1:-1]
u = normalize(conv2d(v, weight_mat, padding=1), eps=self.eps)
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone()
v = v.clone()
# sigma = torch.dot(u, conv2d(v, weight_mat, padding=1))
# sigma = torch.sum(torch.matmul(u, conv2d(v, weight_mat, padding=1)))
sigma = torch.sum(u * conv2d(v, weight_mat, padding=1))
weight = weight / sigma
weight = weight * pow(0.3, 1.0/17.0) # omit this (comment out) if lip constant = 1
return weight, u
def remove(self, module):
weight = getattr(module, self.name)
delattr(module, self.name)
delattr(module, self.name + '_u')
# delattr(module, self.name + '_v')
delattr(module, self.name + '_orig')
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
def __call__(self, module, inputs):
if module.training:
weight, u = self.compute_weight(module)
setattr(module, self.name, weight)
setattr(module, self.name + '_u', u)
else:
r_g = getattr(module, self.name + '_orig').requires_grad
getattr(module, self.name).detach_().requires_grad_(r_g)
@staticmethod
def apply(module, name, n_power_iterations, dim, eps, out_channels=64):
fn = SpectralNorm(name, n_power_iterations, dim, eps)
weight = module._parameters[name]
height = weight.size(dim)
# u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
if module.weight.shape[0] == 1:
C_out = 1
else:
C_out = out_channels
# u = normalize(weight.new_empty(batch_size, C_out , 40, 40).normal_(0, 1), dim=0, eps=fn.eps)
u = normalize(weight.new_empty(1, C_out, 40, 40).normal_(0, 1), eps=fn.eps)# input size
delattr(module, fn.name)
module.register_parameter(fn.name + "_orig", weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# buffer, which will cause weight to be included in the state dict
# and also supports nn.init due to shared storage.
module.register_buffer(fn.name, weight.data)
module.register_buffer(fn.name + "_u", u)
module.register_forward_pre_hook(fn)
return fn
def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None, out_channels=None):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} &= \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) &= \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectal norm
eps (float, optional): epsilon for numerical stability in
calculating norms
dim (int, optional): dimension corresponding to number of outputs,
the default is 0, except for modules that are instances of
ConvTranspose1/2/3d, when it is 1
Returns:
The original module with the spectal norm hook
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
Linear (20 -> 40)
>>> m.weight_u.size()
torch.Size([20])
"""
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
if out_channels is not None:
SpectralNorm.apply(module, name, n_power_iterations, dim, eps, out_channels=out_channels)
else:
SpectralNorm.apply(module, name, n_power_iterations, dim, eps)
return module
def remove_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(
name, module))
class ConvSpectralNorm(object):
def __init__(self, name='weight', sigma=1.0, n_power_iterations=1,
dim=0, eps=1e-12, leakflag=False, kernelsize=3):
self.name = name
self.sigma = sigma
self.dim = dim
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
'got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
self.leakflag=leakflag
self.kernelsize = kernelsize
def normalize(self, tensor, eps=1e-12):
norm = torch.sqrt(torch.sum(tensor * tensor))
# if out is None:
# ret = tensor / norm
# else:
# ret = torch.div(tensor, norm, out=torch.jit._unwrap_optional(out))
ans = tensor / (eps + norm)
return ans
def pad(self, tensor):
padding = [0,1,0,1]
return torch.nn.functional.pad(tensor, pad=padding, mode="reflect")
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
weight_mat = weight
if self.kernelsize < 2:
padding = 0
else:
padding = 1
with torch.no_grad():
for _ in range(self.n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
if not self.leakflag:
v = self.normalize(conv2d(u.flip(2, 3),
weight_mat.permute(1, 0, 2, 3),
padding=padding),
eps=self.eps).flip(2, 3)
u = self.normalize(conv2d(v, weight_mat, padding=padding), eps=self.eps)
else:
v = self.normalize(conv_transpose2d(self.pad(u.flip(2, 3)),
weight_mat.permute(1, 0, 2, 3), padding=padding),
eps=self.eps).flip(2, 3)
u = self.normalize(conv_transpose2d(self.pad(v), weight_mat, padding=padding), eps=self.eps)
# print(u.shape, flush=True)
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone()
v = v.clone()
if not self.leakflag:
cur_sigma = torch.sum(u * conv2d(v, weight_mat, padding=padding))
else:
cur_sigma = torch.sum(u * conv_transpose2d(self.pad(v), weight_mat, padding=padding))
weight = weight / cur_sigma * self.sigma
return weight, u, cur_sigma
def remove(self, module):
weight = getattr(module, self.name)
delattr(module, self.name)
delattr(module, self.name + '_u')
# delattr(module, self.name + '_v')
delattr(module, self.name + '_orig')
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
def __call__(self, module, inputs):
if module.training:
weight, u, cur_sigma = self.compute_weight(module)
setattr(module, self.name, weight)
setattr(module, self.name + '_u', u)
else:
r_g = getattr(module, self.name + '_orig').requires_grad
getattr(module, self.name).detach_().requires_grad_(r_g)
@staticmethod
def apply(module, name, sigma, n_power_iterations, dim, eps, out_channels, leakflag, kernelsize):
fn = ConvSpectralNorm(name, sigma, n_power_iterations, dim, eps, leakflag, kernelsize)
weight = module._parameters[name]
height = weight.size(dim)
if module.weight.shape[0] == 1:
C_out = 1
else:
C_out = out_channels
u = normalize(weight.new_empty(1, C_out, 40, 40).normal_(0, 1), eps=fn.eps)# input size
delattr(module, fn.name)
module.register_parameter(fn.name + "_orig", weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# buffer, which will cause weight to be included in the state dict
# and also supports nn.init due to shared storage.
module.register_buffer(fn.name, weight.data)
module.register_buffer(fn.name + "_u", u)
module.register_forward_pre_hook(fn)
return fn
def conv_spectral_norm(module, name='weight', sigma=1.0, n_power_iterations=1,
eps=1e-12, dim=None, out_channels = 64, leakflag=False, kernelsize=3):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} &= \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) &= \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectal norm
eps (float, optional): epsilon for numerical stability in
calculating norms
dim (int, optional): dimension corresponding to number of outputs,
the default is 0, except for modules that are instances of
ConvTranspose1/2/3d, when it is 1
Returns:
The original module with the spectal norm hook
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
Linear (20 -> 40)
>>> m.weight_u.size()
torch.Size([20])
"""
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
ConvSpectralNorm.apply(module, name, sigma, n_power_iterations, dim, eps, out_channels, leakflag, kernelsize)
return module
def remove_conv_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, ConvSpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(
name, module))
class BatchNormSpectralNorm(object):
def __init__(self, name='weight', sigma=1.0, eps=1e-12):
self.name = name
self.sigma = sigma
self.eps = eps
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
bias = getattr(module, "bias_orig")
running_var = getattr(module, "running_var")
with torch.no_grad():
cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var)))
# print(cur_sigma)
cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma)
# print(cur_sigma)
weight = weight / cur_sigma
bias = bias / cur_sigma
return weight, bias
def remove(self, module):
weight = getattr(module, self.name)
bias = getattr(module, "bias")
delattr(module, self.name)
delattr(module, self.name + '_orig')
delattr(module, "bias")
delattr(module, "bias_orig")
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
module.register_parameter("bias", torch.nn.Parameter(bias.detach()))
def __call__(self, module, inputs):
if module.training:
weight, bias = self.compute_weight(module)
setattr(module, self.name, weight)
setattr(module, "bias", bias)
else:
weight_r_g = getattr(module, self.name + '_orig').requires_grad
bias_r_g = getattr(module, "bias_orig").requires_grad
getattr(module, self.name).detach_().requires_grad_(weight_r_g)
getattr(module, "bias").detach_().requires_grad_(bias_r_g)
@staticmethod
def apply(module, name, sigma, eps):
fn = BatchNormSpectralNorm(name, sigma, eps)
weight = module._parameters[name]
bias = module._parameters["bias"]
delattr(module, fn.name)
delattr(module, "bias")
module.register_parameter(fn.name + "_orig", weight)
module.register_parameter("bias_orig", bias)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# buffer, which will cause weight to be included in the state dict
# and also supports nn.init due to shared storage.
module.register_buffer(fn.name, weight.data)
module.register_buffer("bias", bias.data)
module.register_forward_pre_hook(fn)
return fn
def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} &= \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) &= \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
eps (float, optional): epsilon for numerical stability in
calculating norms
Returns:
The original module with the spectal norm hook
Example::
>>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10))
BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
>>> m.weight_orig.size()
torch.Size([10])
"""
BatchNormSpectralNorm.apply(module, name, sigma, eps)
return module
def remove_bn_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BatchNormSpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(
name, module)) | 20,664 | 42.141962 | 120 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/utils/forward_models_mri.py | import torch, numbers, math
import torch.nn as nn
import torch.nn.functional as torchfunc
import numpy as np
import cv2
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def apply_mask(data, mask_func, seed=None, padding=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
if padding is not None:
mask[:, :, :padding[0]] = 0
mask[:, :, padding[1]:] = 0 # padding value inclusive on right of zeros
masked_data = data * mask + 0.0 # The + 0.0 removes the sign of the zeros
return masked_data, mask
def mask_center(x, mask_from, mask_to):
# b, c, h, w, two = x.shape
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def complex_mul(x, y):
assert x.shape[-1] == y.shape[-1] == 2
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x):
assert x.shape[-1] == 2
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
def fft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def dft_matrix(N, mask):
learnable_parameters = torch.arange(0,N, dtype=torch.float32)
learnable_parameters.requires_grad_(True)
mask_vec = fftshift(mask[0, :], dim=0)
mask_vec = mask_vec > 0
mask_vec = mask_vec.squeeze()
masked_params = torch.masked_select(learnable_parameters, mask_vec)
normalizer = np.sqrt(N)
ii, jj = torch.meshgrid(masked_params, torch.arange(0,N, dtype=torch.float32))
W = torch.exp(-2.0 * np.pi * 1j * ii*jj / N) / normalizer
return W
def onedfft(data, dim):
# data = ifftshift(data, dim=dim)
dim_size = data.shape[dim]
for ii in range(dim_size):
if dim==1:
data[:,ii,:] = torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=0, norm="ortho")
else:
data[ii, :, :] = torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=1, norm="ortho")
# data = ifftshift(data, dim=dim)
return data
def onedifft(data, dim):
# data = ifftshift(data, dim=dim)
dim_size = data.shape[dim]
for ii in range(dim_size):
if dim==1:
data[:,ii,:] = torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=0, norm="ortho")
else:
data[ii, :, :] = torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=1, norm="ortho")
# data = ifftshift(data, dim=dim)
return data
def ifft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_sq(data):
"""
Compute the squared absolute value of a complex tensor
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1)
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def root_sum_of_squares_complex(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt(complex_abs_sq(data).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(x, y):
"""
Apply a center crop on the larger image to the size of the smaller image.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
class ApplyKSpaceMask(nn.Module):
def __init__(self, mask):
super(ApplyKSpaceMask, self).__init__()
self.mask = mask
def forward(self, input):
kspace_data = fft2(ifftshift(input))
masked_kspace_data = kspace_data * self.mask + 0.0
visual_data = fftshift(ifft2(masked_kspace_data))
return visual_data
def gaussian_oned(x):
return 1.0 / np.sqrt(2.0*np.pi) * np.exp(-1*x**2 / 2.0)
def find_nearest(x, array):
idx = (np.abs(x - array)).argmin()
return idx
def exhaustive_sample(center_frac, acceleration, n_cols, seed):
grid = np.linspace(-3.0,3.0,n_cols)
sample_grid = np.zeros((n_cols,))
num_low_freqs = int(round(n_cols * center_frac))
pad = (n_cols - num_low_freqs + 1) // 2
sample_grid[pad:pad+num_low_freqs] = [True]*num_low_freqs
rng = np.random.RandomState(seed=seed)
while True:
sample_point = rng.standard_normal()
if np.abs(sample_point) < 3.0:
nearest_index = find_nearest(sample_point, grid)
sample_grid[nearest_index] = True
ratio_sampled = n_cols / sum(sample_grid)
if acceleration > ratio_sampled:
return sample_grid
def create_mask(shape, center_fraction, acceleration, seed=0, flipaxis=False):
num_cols = shape[-2]
# Create the mask
mask = exhaustive_sample(center_fraction, acceleration, num_cols, seed)
# num_low_freqs = int(round(num_cols * center_fraction))
# prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
# rng = np.random.RandomState(seed=seed)
#
# mask = rng.standard_normal(size=num_cols) < prob
# pad = (num_cols - num_low_freqs + 1) // 2
# mask[pad:pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
if flipaxis:
mask_shape[0] = num_cols
else:
mask_shape[-2] = num_cols
# mask = mask.astype(np.float32)
mask = mask.reshape(*mask_shape).astype(np.float32)
# print(mask.shape)
# exit()
mask = torch.tensor(mask, requires_grad=False)
return mask
class toKspace(nn.Module):
def __init__(self, mask=None):
super(toKspace, self).__init__()
if mask is None:
self.mask = mask
else:
self.register_buffer('mask', tensor=mask)
def forward(self, input):
kspace_data = fft2(ifftshift(input.permute((0,2,3,1))))
if self.mask is not None:
kspace_data = kspace_data * self.mask + 0.0
return kspace_data.permute((0,3,1,2))
def dft_matrix_perturbed(N, mask, center_frac=0.04):
learnable_parameters = torch.arange(0,N, dtype=torch.float32)
final_center = int(N * center_frac / 2)
learnable_parameters.requires_grad_(True)
mask_vec = fftshift(mask[0, :], dim=0)
mask_vec = mask_vec > 0
mask_vec = mask_vec.squeeze()
masked_params = torch.masked_select(learnable_parameters, mask_vec)
normalizer = np.sqrt(N)
torch.random.manual_seed(20)
masked_params[1 + final_center:-final_center] = masked_params[
1 + final_center:-final_center] + 2 * torch.rand_like(
masked_params[1 + final_center:-final_center]) - 1.0
ii, jj = torch.meshgrid(masked_params, torch.arange(0,N, dtype=torch.float32))
W = torch.exp(-2.0 * np.pi * 1j * ii*jj / N) / normalizer
return W
class subsampledKspace(nn.Module):
def __init__(self, N, mask, center_frac=0.04, perturbed=False):
super(subsampledKspace, self).__init__()
all_frequencies = torch.arange(0, N, dtype=torch.float32)
final_center = int(N * center_frac / 2)
mask_vec = fftshift(mask[0, :], dim=0)
mask_vec = mask_vec > 0
mask_vec = mask_vec.squeeze()
masked_params = torch.masked_select(all_frequencies, mask_vec)
if perturbed:
torch.manual_seed(110)
masked_params[1 + final_center:-final_center] = masked_params[
1 + final_center:-final_center] + 2 * torch.rand_like(
masked_params[1 + final_center:-final_center]) - 1.0
masked_params.requires_grad_(True)
self.register_parameter('freqs', param=torch.nn.Parameter(masked_params))
#self.dft_mat = self.dft_matrix_perturbed(N).to(device=torch.device('cuda'))
self.register_parameter('dft_mat', param=torch.nn.Parameter(self.dft_matrix_perturbed(N), requires_grad=False))
def forward(self, input):
input = ifftshift(input.permute((0, 2, 3, 1)))
complex_input = torch.view_as_complex(input)
# complex_input = complex_input.permute(0,2,1)
kspace = complex_input @ torch.transpose(self.dft_mat, 0, 1)
# kspace = kspace.permute(0, 2, 1)
kspace = torch.fft.fftn(kspace, dim=1, norm="ortho")
kspace = torch.view_as_real(kspace)
# output = ifftshift(kspace).permute((0, 3, 1, 2))
output = fftshift(kspace).permute((0, 3, 1, 2))
return output
def gramian(self, input):
input = ifftshift(input.permute((0, 2, 3, 1)))
complex_input = torch.view_as_complex(input)
# complex_input = complex_input.permute(0,2,1)
kspace = complex_input @ torch.transpose(self.dft_mat, 0, 1)
realspace = kspace @ torch.conj(self.dft_mat)
# realspace = realspace.permute(0,2,1)
realspace = torch.view_as_real(realspace)
output = ifftshift(realspace).permute((0,3,1,2))
return output
def adjoint(self, input):
input = ifftshift(input.permute((0,2,3,1)))
complex_input = torch.view_as_complex(input)
complex_input = torch.fft.ifftn(complex_input, dim=1, norm="ortho")
realspace = complex_input @ torch.conj(self.dft_mat)
realspace = torch.view_as_real(realspace)
output = ifftshift(realspace).permute((0, 3, 1, 2))
return output
def dft_matrix_perturbed(self, N):
normalizer = np.sqrt(N)
ii, jj = torch.meshgrid(self.freqs, torch.arange(0, N, dtype=torch.float32))
W = torch.exp(-2.0 * np.pi * 1j * ii * jj / N) / normalizer
return W
class zeroFillin(nn.Module):
def __init__(self, N, mask, center_frac=0.04, perturbed=False):
super(zeroFillin, self).__init__()
all_frequencies = torch.arange(0, N, dtype=torch.float32)
final_center = int(N * center_frac / 2)
mask_vec = fftshift(mask[0, :], dim=0)
mask_vec = mask_vec > 0
mask_vec = mask_vec.squeeze()
masked_params = torch.masked_select(all_frequencies, mask_vec)
self.mask = torch.reshape(mask, (1,1,320))
def forward(self, input):
# input = ifftshift(input.permute((0, 2, 3, 1)))
# complex_input = torch.view_as_complex(input)
# # complex_input = complex_input.permute(0,2,1)
# kspace = complex_input @ torch.transpose(self.dft_mat, 0, 1)
# # kspace = kspace.permute(0, 2, 1)
# kspace = torch.fft.fftn(kspace, dim=1, norm="ortho")
#
# kspace = torch.view_as_real(kspace)
#
# # output = ifftshift(kspace).permute((0, 3, 1, 2))
# output = fftshift(kspace).permute((0, 3, 1, 2))
#
# kspace_data = fft2(ifftshift(input.permute((0, 2, 3, 1))))
input = ifftshift(input.permute((0, 2, 3, 1)))
complex_input = torch.view_as_complex(input)
# print(complex_input.shape)
# print(self.mask.shape)
# exit()
kspace = torch.fft.fftn(complex_input, dim=1, norm="ortho")
kspace = torch.fft.fftn(kspace, dim=2, norm="ortho")
kspace = fftshift(kspace)
if self.mask is not None:
kspace_data = kspace * self.mask + 0.0
kspace_data = ifftshift(kspace_data)
return torch.view_as_real(kspace_data)
def gramian(self, input):
input = ifftshift(input.permute((0, 2, 3, 1)))
complex_input = torch.view_as_complex(input)
# print(complex_input.shape)
# print(self.mask.shape)
# exit()
kspace = torch.fft.fftn(complex_input, dim=1, norm="ortho")
kspace = torch.fft.fftn(kspace, dim=2, norm="ortho")
kspace = fftshift(kspace)
if self.mask is not None:
kspace_data = kspace * self.mask + 0.0
kspace_data = ifftshift(kspace_data)
# testy = torch.view_as_real(kspace_data)
# # testy = ifftshift(realspace)
#
# import matplotlib
# matplotlib.use("TkAgg")
# import matplotlib.pyplot as plt
# abs_img = torch.log10(torch.sqrt(testy[0, :, :, 0] ** 2 + testy[0, :, :, 1] ** 2)).detach().cpu().numpy()
# # abs_img = torch.sqrt(testy[0, :, :, 0] ** 2 + testy[0, :, :, 1] ** 2).detach().cpu().numpy()
#
# plt.imshow(abs_img)
# plt.show()
# exit()
kspace_data = torch.fft.ifftn(kspace_data, dim=1, norm="ortho")
realspace = torch.fft.ifftn(kspace_data, dim=2, norm="ortho")
realspace = torch.view_as_real(realspace)
# testy = ifftshift(realspace)
#
# import matplotlib
# matplotlib.use("TkAgg")
# import matplotlib.pyplot as plt
# # abs_img = torch.log10(torch.sqrt(testy[0, :, :, 0] ** 2 + testy[0, :, :, 1] ** 2)).detach().cpu().numpy()
# abs_img = torch.sqrt(testy[0, :, :, 0] ** 2 + testy[0, :, :, 1] ** 2).detach().cpu().numpy()
#
# plt.imshow(abs_img)
# plt.show()
# exit()
output = ifftshift(realspace).permute((0,3,1,2))
return output
def adjoint(self, input):
# input = ifftshift(input)
# print(input.shape)
complex_input = torch.view_as_complex(input)
complex_input = torch.fft.ifftn(complex_input, dim=1, norm="ortho")
realspace = torch.fft.ifftn(complex_input, dim=2, norm="ortho")
realspace = torch.view_as_real(realspace)
output = ifftshift(realspace).permute((0, 3, 1, 2))
return output
def dft_matrix_perturbed(self, N):
normalizer = np.sqrt(N)
ii, jj = torch.meshgrid(self.freqs, torch.arange(0, N, dtype=torch.float32))
W = torch.exp(-2.0 * np.pi * 1j * ii * jj / N) / normalizer
return W
class toKspaceMulti(nn.Module):
def __init__(self, masks):
super(toKspaceMulti, self).__init__()
self.masks = masks
self.ii = 0
def advance_ii(self):
self.ii = (self.ii + 1) % 3
def forward(self, input):
kspace_data = fft2(ifftshift(input.permute((0,2,3,1))))
mask = self.masks[self.ii]
kspace_data = kspace_data * mask + 0.0
return kspace_data.permute((0,3,1,2))
class fromKspace(nn.Module):
def __init__(self, mask=None):
super(fromKspace, self).__init__()
if mask is None:
self.mask = mask
else:
self.register_buffer('mask', tensor=mask)
def forward(self, input):
if self.mask is not None:
input = input.permute((0,2,3,1)) * self.mask + 0.0
else:
input = input.permute((0,2,3,1))
image_data = ifftshift(ifft2(input))
return image_data.permute((0,3,1,2))
| 21,838 | 33.446372 | 119 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/utils/spectral_norm_chen.py | """
Spectral Normalization borrowed from https://arxiv.org/abs/1802.05957
Real SN by convolution. Each layer has lipschtz constant of 1
"""
import torch
from torch.nn.functional import conv2d
from torch.nn.parameter import Parameter
def normalize(tensor, eps=1e-12):
norm = float(torch.sqrt(torch.sum(tensor * tensor)))
norm = max(norm, eps)
# if out is None:
# ret = tensor / norm
# else:
# ret = torch.div(tensor, norm, out=torch.jit._unwrap_optional(out))
ans = tensor / norm
return ans
class SpectralNorm(object):
def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12):
self.name = name
self.dim = dim
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
'got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
weight_mat = weight
# out, In, kernel0, kernel1 = weight_mat.shape
'''
if self.dim != 0:
# permute dim to front
weight_mat = weight_mat.permute(self.dim,
*[d for d in range(weight_mat.dim()) if d != self.dim])
height = weight_mat.size(0)
weight_mat = weight_mat.reshape(height, -1)
'''
with torch.no_grad():
for _ in range(self.n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
# v = normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
# u = normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
# v = normalize(conv2d(u, weight_mat.permute(1,0,2,3), padding=1), dim=0, eps=self.eps)
# u = normalize(conv2d(v, weight_mat, padding=1), dim=0, eps=self.eps)
v = normalize(conv2d(u.flip(2,3), weight_mat.permute(1, 0, 2, 3), padding=2),
eps=self.eps).flip(2,3)[:,:,1:-1,1:-1]
u = normalize(conv2d(v, weight_mat, padding=1), eps=self.eps)
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone()
v = v.clone()
# sigma = torch.dot(u, conv2d(v, weight_mat, padding=1))
# sigma = torch.sum(torch.matmul(u, conv2d(v, weight_mat, padding=1)))
sigma = torch.sum(u * conv2d(v, weight_mat, padding=1))
weight = weight / sigma
weight = weight# * pow(0.3, 1.0/17.0) # omit this (comment out) if lip constant = 1
return weight, u
def remove(self, module):
weight = getattr(module, self.name)
delattr(module, self.name)
delattr(module, self.name + '_u')
# delattr(module, self.name + '_v')
delattr(module, self.name + '_orig')
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
def __call__(self, module, inputs):
if module.training:
weight, u = self.compute_weight(module)
setattr(module, self.name, weight)
setattr(module, self.name + '_u', u)
else:
r_g = getattr(module, self.name + '_orig').requires_grad
getattr(module, self.name).detach_().requires_grad_(r_g)
@staticmethod
def apply(module, name, n_power_iterations, dim, eps):
fn = SpectralNorm(name, n_power_iterations, dim, eps)
weight = module._parameters[name]
height = weight.size(dim)
# u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
if module.weight.shape[0] == 2:
C_out = 2
elif module.weight.shape[0] == 3:
C_out = 3
else:
C_out = 64
# u = normalize(weight.new_empty(batch_size, C_out , 40, 40).normal_(0, 1), dim=0, eps=fn.eps)
u = normalize(weight.new_empty(1, C_out, 40, 40).normal_(0, 1), eps=fn.eps)# input size
delattr(module, fn.name)
module.register_parameter(fn.name + "_orig", weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# buffer, which will cause weight to be included in the state dict
# and also supports nn.init due to shared storage.
module.register_buffer(fn.name, weight.data)
module.register_buffer(fn.name + "_u", u)
module.register_forward_pre_hook(fn)
return fn
def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} &= \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) &= \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectal norm
eps (float, optional): epsilon for numerical stability in
calculating norms
dim (int, optional): dimension corresponding to number of outputs,
the default is 0, except for modules that are instances of
ConvTranspose1/2/3d, when it is 1
Returns:
The original module with the spectal norm hook
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
Linear (20 -> 40)
>>> m.weight_u.size()
torch.Size([20])
"""
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, n_power_iterations, dim, eps)
return module
def remove_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(
name, module))
| 7,791 | 43.272727 | 120 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/utils/bsd500.py | import torch
import h5py
import random
import numpy as np
import os
from PIL import Image
from torchvision import transforms
class Dataset(torch.utils.data.Dataset):
def __init__(self, train=True, mode='S'):
super(Dataset, self).__init__()
self.train = train
self.mode = mode
self.data_loc = '/share/data/vision-greg2/users/gilton/train.h5'
self.val_loc = '/share/data/vision-greg2/users/gilton/val.h5'
if self.train:
if self.mode == 'S':
h5f = h5py.File(self.data_loc, 'r')
elif self.mode == 'B':
h5f = h5py.File('train_B.h5', 'r')
else:
if self.mode == 'S':
h5f = h5py.File(self.val_loc, 'r')
elif self.mode == 'B':
h5f = h5py.File('val_B.h5', 'r')
self.keys = list(h5f.keys())
random.shuffle(self.keys)
h5f.close()
def __len__(self):
return len(self.keys)
def __getitem__(self, index):
if self.train:
if self.mode == 'S':
h5f = h5py.File(self.data_loc, 'r')
elif self.mode == 'B':
h5f = h5py.File('train_B.h5', 'r')
# h5f = h5py.File('train.h5', 'r')
else:
if self.mode == 'S':
h5f = h5py.File(self.val_loc, 'r')
elif self.mode == 'B':
h5f = h5py.File('val_B.h5', 'r')
# h5f = h5py.File('val.h5', 'r')
key = self.keys[index]
#scale from -1 to 1
data = 2*np.array(h5f[key]) - 1
h5f.close()
return torch.Tensor(data)
def directory_filelist(target_directory):
file_list = [f for f in sorted(os.listdir(target_directory))
if os.path.isfile(os.path.join(target_directory, f))]
file_list = list(file_list)
file_list = [f for f in file_list if not f.startswith('.')]
return file_list
def load_img(file_name):
with open(file_name,'rb') as f:
img = Image.open(f).convert("L")
return img
class EquilibriumDataset(torch.utils.data.Dataset):
def __init__(self, target_directory, init_directory, validation_data=False, transform=None):
super(EquilibriumDataset, self).__init__()
filelist = directory_filelist(target_directory)
training_data = filelist
self.full_filelist = [target_directory + single_file for single_file in training_data]
self.init_directory = init_directory
self.transform = transform
self.options = ['_1.png','_2.png','_3.png','_4.png']
def __len__(self):
return len(self.full_filelist)
def convert_to_2d(self, x):
return torch.cat((x, torch.zeros_like(x)), dim=0)
def __getitem__(self, item):
image_name = self.full_filelist[item]
# image_name = "/Users/dgilton/Documents/MATLAB/prDeep-master/train/test_001.png"
data = load_img(image_name)
if self.transform is not None:
data = self.transform(data)
data = 2.0*data - 1.0
data = self.convert_to_2d(data)
random_choice = random.choice(self.options)
initial_point_filename = os.path.splitext(os.path.split(image_name)[1])[0] + random_choice
initial_point = load_img(self.init_directory + initial_point_filename)
if self.transform is not None:
initial_point = self.transform(initial_point)
initial_point = 2.0 * initial_point - 1.0
initial_point = self.convert_to_2d(initial_point)
return data, initial_point
if __name__=="__main__":
dataset_folder = "/Users/dgilton/PycharmProjects/provableplaying/training/data/train/"
transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
dataset = EquilibriumDataset(dataset_folder, transform=transform)
print(dataset[0].shape) | 3,858 | 34.731481 | 98 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/utils/cg_utils.py | import torch.nn as nn
import torch
def complex_conj(x):
assert x.shape[1] == 2
return torch.stack((x[:,0, ...], -x[:,1,...]), dim=1)
def torchdotproduct(x,y):
# if complexdata:
# y = complex_conj(y)
return torch.sum(x*y,dim=[1,2,3])
def single_cg_iteration(x, d, g, b, ATA, regularization_lambda):
def regATA(input, ATA):
return ATA(input) + regularization_lambda*input
Qd = regATA(d, ATA)
dQd = torchdotproduct(d, Qd)
alpha = -torchdotproduct(g,d) / dQd
alpha = alpha.view((-1,1,1,1))
x = x + alpha * d
g = regATA(x, ATA) - b
gQd = torchdotproduct(g, Qd)
beta = gQd / dQd
beta = beta.view((-1,1,1,1))
d = -g + beta*d
return x, d, g
# This function solves the system ATA x = ATy, where initial_point is supposed
# to be ATy. This can be backpropagated through.
def conjugate_gradient(initial_point, ATA, regularization_lambda, n_iterations=10):
x = torch.zeros_like(initial_point)
d = initial_point
g = -d
for ii in range(n_iterations):
x, d, g = single_cg_iteration(x, d, g, initial_point, ATA, regularization_lambda)
return x
def complex_dotproduct(x, y):
return torchdotproduct(complex_conj(x), y)
def single_cg_iteration_MRI(rTr, x, r, p, ATA, regularization_lambda):
batch_size = x.shape[0]
def regATA(input):
return ATA(input) + regularization_lambda*input
Ap = regATA(p)
rTr = rTr.view(batch_size, 1, 1, 1)
alpha = rTr / complex_dotproduct(p, Ap).view(batch_size, 1, 1, 1)
x_new = x + alpha * p
r_new = r - alpha * Ap
rTr_new = complex_dotproduct(r_new, r_new)
rTr_new = rTr_new.view(batch_size, 1, 1, 1)
beta = rTr_new / rTr
p_new = r + beta * p
return rTr_new, x_new, r_new, p_new
def conjugate_gradient_MRI(initial_point, ATA, regularization_lambda, n_iterations=10):
'''Strightforward implementation of MoDLs code'''
x = torch.zeros_like(initial_point)
r = initial_point
p = initial_point
rTr = complex_dotproduct(r, r)
for ii in range(n_iterations):
rTr, x, r, p = single_cg_iteration_MRI(rTr, x, r, p, ATA, regularization_lambda)
return x | 2,165 | 29.507042 | 89 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/pytorch_ssim/__init__.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
| 2,635 | 34.621622 | 104 | py |
federated-boosted-dp-trees | federated-boosted-dp-trees-master/federated_gbdt/models/base/jit_functions.py | import numba
import math
@numba.jit(nopython=True)
def _L1_clip(total_grads, reg_alpha):
"""
L1 regularisation on the gradients, controlled by self.reg_alpha
:param total_grads:
:return:
"""
if total_grads > reg_alpha:
return total_grads - reg_alpha
elif total_grads < -1 * reg_alpha:
return total_grads + reg_alpha
else:
return 0
@numba.jit(nopython=True)
def _calculate_weight(total_grads, total_hess, reg_alpha, reg_delta, reg_lambda):
"""
Calculates weight for leaf nodes
:param total_grads: Total sum of gradients
:param total_hess: Total sum of hessians
:return: Weight for leaf node
"""
if total_hess < 0:
total_hess = 0
weight = -1 * (_L1_clip(total_grads, reg_alpha) / (total_hess + reg_lambda))
if reg_delta != 0 and abs(weight) > reg_delta:
return math.copysign(reg_delta, weight) # Delta clipping
else:
return weight
@numba.jit(nopython=True)
def _calculate_gain(total_grads, total_hess, reg_alpha, reg_delta, reg_lambda):
"""
Calculates gain from sum of gradients and sum of hessians
:param total_grads: Sum of gradients
:param total_hess: Sum of hessians
:return: Gain score
"""
con = _L1_clip(total_grads, reg_alpha)
weight = -1 * (con / (total_hess + reg_lambda))
if reg_delta != 0 and abs(weight) > reg_delta: # If delta-clipping is enabled the gain calculation is a little more complicated, following the implementation in the original XGBoost: https://github.com/dmlc/xgboost/blob/d7d1b6e3a6e2aa8fcb1857bf5e3188302a03b399/src/tree/param.h
weight = math.copysign(reg_delta, weight) # Delta clipping
return -(2 * total_grads * weight + (total_hess + reg_lambda) * weight ** 2) + reg_alpha * abs(weight) # This is an L1-regularised clipped gain calculation
else:
return -weight * con # G^2/H + lambda, with possible L1 regularisation and delta clipping on G
| 1,972 | 34.872727 | 281 | py |
federated-boosted-dp-trees | federated-boosted-dp-trees-master/federated_gbdt/models/gbdt/private_gbdt.py | import math
import random
import sys
import pandas as pd
import numpy as np
from collections import Counter, defaultdict
from copy import copy
from fast_histogram import histogram1d
from federated_gbdt.models.base.tree_base import TreeBase
from federated_gbdt.models.base.tree_node import DecisionNode
from federated_gbdt.models.base.jit_functions import _calculate_gain, _calculate_weight, _L1_clip # numba funcs
from federated_gbdt.models.gbdt.components.split_candidate_manager import SplitCandidateManager
from federated_gbdt.models.gbdt.components.index_sampler import IndexSampler
from federated_gbdt.models.gbdt.components.privacy_accountant import PrivacyAccountant
from federated_gbdt.models.gbdt.components.train_monitor import TrainMonitor
from federated_gbdt.core.pure_ldp.frequency_oracles.hybrid_mechanism.hybrid_mech_client import HMClient
from federated_gbdt.core.loss_functions import SigmoidBinaryCrossEntropyLoss, BinaryRFLoss, SoftmaxCrossEntropyLoss
from sklearn.preprocessing import LabelBinarizer
class PrivateGBDT(TreeBase):
def __init__(self, num_trees=2, max_depth=6, # Default tree params
task_type="classification", loss=SigmoidBinaryCrossEntropyLoss(),
reg_lambda=1, reg_alpha=0, reg_gamma=1e-7, reg_eta=0.3, reg_delta=2, # Regularisation params
min_samples_split=2, min_child_weight=0, # Regularisation params
subsample=1, row_sample_method=None, colsample_bytree=1, colsample_bylevel=1, colsample_bynode=1, # Sampling params
sketch_type="uniform", sketch_eps=0.001, sketch_rounds=float("inf"), bin_type="all", range_multiplier=1, hist_bin=32, categorical_map=None, # Sketch params
dp_method="", accounting_method="rdp_scaled_improved", epsilon=1, quantile_epsilon=0, gradient_clipping=False, # DP params
tree_budgets=None, level_budgets=None, gradient_budgets=None, # DP params
ignore_split_constraints=False, grad_clip_const=None, # DP params
split_method="hist_based", weight_update_method="xgboost", training_method="boosting", batched_update_size=1, # training method params
feature_interaction_method="", feature_interaction_k=None, full_ebm=False, # feature interaction params
early_stopping=None, es_metric=None, es_threshold=-5, es_window=3, # early stopping
track_budget=True, split_method_per_level=None, hist_estimator_method=None, sigma=None, verbose=False, output_train_monitor=False):
super(PrivateGBDT, self).__init__(min_samples_split=min_samples_split, max_depth=max_depth, task_type=task_type)
self.output_train_monitor = output_train_monitor
# Training type
self.training_method = training_method
self.loss = loss
if self.training_method == "rf":
self.loss = BinaryRFLoss()
self.batched_update_size = batched_update_size
self.weight_update_method = weight_update_method # xgboost vs gbm updates
self.split_method = split_method # Determines how splits are chosen - hist_based, partially_random, totally_random, hybrid_random
self.split_method_per_level = split_method_per_level
self.feature_interaction_method = feature_interaction_method
self.feature_interaction_k = feature_interaction_k
self.full_ebm = full_ebm
if self.split_method in ["hist_based", "partially_random", "totally_random", "node_based"]:
self.split_method_per_level = [self.split_method]*self.max_depth
if self.split_method == "hybrid_random" and self.split_method_per_level is None:
self.split_method_per_level = ["totally_random"] * self.max_depth # By default just do totally random
self.hist_estimator_method = hist_estimator_method # one_sided, two_sided, two_sided_averaging
# Base Parameters
self.num_trees = num_trees
self.feature_list = None
self.num_features = None
self.X = None
self.ignore_split_constraints = ignore_split_constraints
self.feature_bin = []
self.gradient_histogram, self.hessian_histogram, self.root_hessian_histogram = [], [], []
# Tracking vars
self.train_monitor = TrainMonitor(0)
# Regularisation Parameters
self.reg_lambda = reg_lambda # L2 regularisation on weights
self.reg_alpha = reg_alpha # L1 regularisation on gradients
self.reg_gamma = reg_gamma # Equivalent to the min impurity score needed to split a node further (or just leave it as a leaf)
self.min_child_weight = min_child_weight # Minimum sum of instance weight (hessian) needed in a child, if the sum of hessians less than this then the node is not split further
self.min_samples_split = min_samples_split
self.reg_eta = reg_eta # Learning rate - multiplicative factor on weights
self.reg_delta = reg_delta # Clipping on the weights -> Useful in imablanced scenarios where it's possible for the hess to be 0 and thus the weights arbitraily large
# Random Sampling Parameters
self.index_sampler = IndexSampler(subsample, row_sample_method, colsample_bytree, colsample_bylevel, colsample_bynode)
# Binning / Sketching Parameters for Feature Splits
self.split_candidate_manager = SplitCandidateManager(hist_bin, self.num_trees, quantile_epsilon,
sketch_type, sketch_rounds, categorical_map,
sketch_eps, bin_type, range_multiplier)
# Privacy (DP) Parameters
self.dp_method = dp_method
self.track_budget = track_budget
self.verbose = verbose
# The delta value of 1e-5 is a placeholder that is updated to 1/n when the dataset is being trained
self.privacy_accountant = PrivacyAccountant(accounting_method, epsilon, 1e-5, quantile_epsilon, dp_method,
self.num_trees, self.max_depth, self.split_method, self.training_method, self.weight_update_method,
split_method_per_level=self.split_method_per_level,
tree_budgets=tree_budgets, gradient_budgets=gradient_budgets, level_budgets=level_budgets,
feature_interaction_method=self.feature_interaction_method, feature_interaction_k=self.feature_interaction_k,
sample_method=self.index_sampler.row_sample_method, subsample=self.index_sampler.subsample,
sketch_type=self.split_candidate_manager.sketch_type, sketch_rounds=self.split_candidate_manager.sketch_rounds,
task_type=self.task_type, sigma=sigma, grad_clip_const=grad_clip_const, gradient_clipping=gradient_clipping,
verbose=self.verbose)
# Early stopping (not used)
self.early_stopping = early_stopping
self.es_metric = "root_hess" if es_metric is None else es_metric
self.es_window = es_window
self.es_threshold = es_threshold
def _reset_accountant(self):
self.privacy_accountant = PrivacyAccountant(self.privacy_accountant.accounting_method, self.privacy_accountant.epsilon, 1e-5,
self.privacy_accountant.quantile_epsilon, self.dp_method,
self.num_trees, self.max_depth, self.split_method, self.training_method, self.weight_update_method,
split_method_per_level=self.split_method_per_level,
tree_budgets=self.privacy_accountant.tree_budgets, gradient_budgets=self.privacy_accountant.gradient_budgets, level_budgets=self.privacy_accountant.level_budgets,
feature_interaction_method=self.feature_interaction_method, feature_interaction_k=self.feature_interaction_k,
sample_method = self.index_sampler.row_sample_method, subsample=self.index_sampler.subsample,
sketch_type=self.split_candidate_manager.sketch_type, sketch_rounds=self.split_candidate_manager.sketch_rounds,
task_type=self.task_type, sigma=self.privacy_accountant.sigma,
grad_clip_const=self.privacy_accountant.grad_clip_const, gradient_clipping=self.privacy_accountant.gradient_clipping,
verbose=self.verbose,)
def _reset_tracking_attributes(self, checkpoint):
self.X, self.y = None, None
# These dont need to be removed but save space...
if not checkpoint:
self.train_monitor.current_tree_weights = []
self.train_monitor.previous_tree_weights = []
self.train_monitor.y_weights = []
self.train_monitor.leaf_gradient_tracker = [[], []]
self.train_monitor.root_gradient_tracker = [[], []]
self.train_monitor.gradient_info = []
self.privacy_accountant = None
self.gradient_histogram = []
self.feature_bin = []
# Gradient/Hessian Calculations
# ---------------------------------------------------------------------------------------------------
def _compute_grad_hessian_with_samples(self, y, y_pred):
"""
Called at the start of every tree, computes gradients and hessians for every observation from the previous predictions of the ensemble
If using a LDP method, the perturbation is done here and the tree is formed as a post-processing step on the LDP perturbed gradients
Otherwise, the raw gradients are passed to the model from fit() to _build_tree() and they are perturbed later on in _add_dp_noise()
:param y: True labels
:param y_pred: Predicted labels
:return: List of gradients and hessians
"""
if self.task_type == 'classification' or self.task_type == "regression":
grads = self.loss.compute_grad(y, y_pred)
if self.task_type == "regression":
grads = np.clip(grads, self.privacy_accountant.min_gradient, self.privacy_accountant.max_gradient)
if self.weight_update_method == "xgboost":
hess = self.loss.compute_hess(y, y_pred)
else:
hess = np.ones(len(y))
if self.dp_method == "mean_mech_ldp":
# Use mean mechanism perturbation
hess_hm_client = HMClient(self.privacy_accountant.tree_budgets[len(self.trees)]*self.epsilon, self.privacy_accountant.max_hess, self.privacy_accountant.min_hess) # Hess perturber
grad_hm_client = HMClient(self.privacy_accountant.tree_budgets[len(self.trees)]*self.epsilon, self.privacy_accountant.max_gradient, self.privacy_accountant.min_gradient) # Grad perturber
grads = np.array([grad_hm_client.privatise(g) for g in grads])
hess = np.array([hess_hm_client.privatise(h) for h in hess])
elif self.dp_method == "gaussian_ldp":
# Gaussian LDP
grad_sigma = self.privacy_accountant.gaussian_var(gradient_type="gradient", depth=self.max_depth-1)
hess_sigma = self.privacy_accountant.gaussian_var(gradient_type="hessian", depth=self.max_depth-1)
if self.split_method == "hist_based":
grad_sigma /= math.sqrt(self.num_features * self.max_depth)
hess_sigma /= math.sqrt(self.num_features * self.max_depth)
gradient_noise = np.random.normal(0, grad_sigma, size=(len(grads)))
hess_noise = np.random.normal(0, grad_sigma, size=(len(hess)))
grads = grads + gradient_noise
hess = hess + hess_noise
else:
raise TypeError('%s task is not included in our XGboost algorithm !' % self.task_type)
return grads, hess
# Following methods assume that the total grads/hess that are passed have already been perturbed under some DP scheme
def _L1_clip(self, total_grads):
"""
L1 regularisation on the gradients, controlled by self.reg_alpha
:param total_grads:
:return:
"""
return _L1_clip(total_grads, self.reg_alpha)
# if total_grads > self.reg_alpha:
# return total_grads - self.reg_alpha
# elif total_grads < -1 * self.reg_alpha:
# return total_grads + self.reg_alpha
# else:
# return 0
def _calculate_weight(self, total_grads, total_hess):
"""
Calculates weight for leaf nodes
:param total_grads: Total sum of gradients
:param total_hess: Total sum of hessians
:return: Weight for leaf node
"""
# if total_hess < self.min_hess:
# total_hess = 0
return _calculate_weight(total_grads, total_hess, self.reg_alpha, self.reg_delta, self.reg_lambda)
# weight = -1 * (self._L1_clip(total_grads) / (total_hess + self.reg_lambda))
# if self.reg_delta != 0 and abs(weight) > self.reg_delta:
# return math.copysign(self.reg_delta, weight) # Delta clipping
# else:
# return weight
def _calculate_gain(self, total_grads, total_hess):
"""
Calculates gain from sum of gradients and sum of hessians
:param total_grads: Sum of gradients
:param total_hess: Sum of hessians
:return: Gain score
"""
return _calculate_gain(total_grads, total_hess, self.reg_alpha, self.reg_delta, self.reg_lambda)
# weight = self._calculate_weight(total_grads, total_hess)
# if self.reg_delta == 0:
# return -0.5 * weight * self._L1_clip(total_grads) # G^2/H + lambda, with possible L1 regularisation and delta clipping on G
# else: # If delta-clipping is enabled the gain calculation is a little more complicated, following the implementation in the original XGBoost: https://github.com/dmlc/xgboost/blob/d7d1b6e3a6e2aa8fcb1857bf5e3188302a03b399/src/tree/param.h
# return -(2 * total_grads * weight + (total_hess + self.reg_lambda) * weight ** 2) + self.reg_alpha * abs(weight) # This is an L1-regularised clipped gain calculation
def _calculate_split_score(self, left_gain, right_gain, total_gain):
return 0.5 * (left_gain + right_gain - total_gain)
def _calculate_leaf_weight(self, total_grads, total_hess):
"""
Calculates weight for leaf nodes, with optional learning rate specified by self.reg_eta
:param total_grads: Sum of gradients
:param total_hess: Sum of hessians
:return: Leaf weight
"""
if self.reg_alpha == 0:
reg_alpha = float("inf")
else:
reg_alpha = self.reg_alpha
if self.num_classes > 2:
total_hess = np.clip(total_hess, 0, float("inf"))
weight = -1 * (np.clip(total_grads, -reg_alpha, reg_alpha) / (total_hess + self.reg_lambda))
if self.reg_delta != 0:
clip_idx = np.abs(weight) > self.reg_delta
weight[clip_idx] = np.copysign(self.reg_delta, weight[clip_idx])
return weight
else:
return self._calculate_weight(total_grads, total_hess) * self.reg_eta # Multiply the weight by the learning rate for leaf values
# Main training logic
# ---------------------------------------------------------------------------------------------------
# Public method to train the model
def fit(self, X, y):
"""
Main training loop
:param X: Training data as a pandas dataframe/ numpy array
:param y: Training labels
:return: self (trained GBDT model)
"""
X = self._convert_df(X)
self.num_features = X.shape[1]
self.feature_list = range(0, self.num_features)
self.train_monitor.update_num_clients(X.shape[0])
# Calculate split candidates
self.train_monitor.start_timing_event("server", "initial split candidates")
self.split_candidate_manager.find_split_candidates(X, 0, None, features_considering=self.feature_list)
self.train_monitor.end_timing_event("server", "initial split candidates")
# TODO: Track comm (split candidates)
self.train_monitor.update_received(range(0, X.shape[0]), 8*len(self.split_candidate_manager.feature_split_candidates)*len(self.split_candidate_manager.feature_split_candidates[0]))
self.privacy_accountant.update_feature_candidate_size(self.split_candidate_manager.feature_split_candidates)
self.X = X
self.train_monitor.batched_weights = np.zeros(self.X.shape[0])
if "cyclical" in self.feature_interaction_method and (self.split_candidate_manager.sketch_type == "adaptive_hessian" or self.full_ebm):
if self.full_ebm:
self.num_trees = self.num_trees * X.shape[1]
self.split_candidate_manager.sketch_rounds = min(self.num_trees, self.split_candidate_manager.sketch_rounds*self.num_features)
# recompute budget allocation
self.train_monitor.start_timing_event("server", "privacy_accountant initialisation")
self.privacy_accountant.__init__(self.privacy_accountant.accounting_method, epsilon=self.privacy_accountant.epsilon, delta=self.privacy_accountant.delta,
quantile_epsilon=self.privacy_accountant.quantile_epsilon, dp_method=self.dp_method,
num_trees=self.num_trees, max_depth=self.max_depth, split_method=self.split_method, training_method=self.training_method, weight_update_method=self.weight_update_method,
split_method_per_level=self.split_method_per_level,
feature_interaction_method=self.feature_interaction_method, feature_interaction_k=self.feature_interaction_k,
sample_method = self.index_sampler.row_sample_method, subsample=self.index_sampler.subsample,
sketch_type=self.split_candidate_manager.sketch_type, sketch_rounds=self.split_candidate_manager.sketch_rounds,
task_type=self.task_type, sigma=self.privacy_accountant.sigma,
grad_clip_const=self.privacy_accountant.grad_clip_const, gradient_clipping=self.privacy_accountant.gradient_clipping,
verbose=self.verbose,)
self.train_monitor.end_timing_event("server", "privacy_accountant initialisation")
if self.batched_update_size < 1:
self.batched_update_size = int(self.batched_update_size * self.num_trees)
y = y if not isinstance(y, pd.Series) else y.values
self.num_classes = len(np.unique(y))
self.train_monitor.set_num_classes(self.num_classes)
self.train_monitor.start_timing_event("server", "initialise model weights")
if self.num_classes > 2:
self.loss = SoftmaxCrossEntropyLoss()
y = LabelBinarizer().fit_transform(y)
self.train_monitor.y_weights = np.full((X.shape[0], self.num_classes), 1/self.num_classes,)
self.train_monitor.current_tree_weights = np.zeros((X.shape[0], self.num_classes))
else:
self.train_monitor.y_weights = np.zeros(X.shape[0]) # Initialise training weights to zero which is sigmoid(0) = 0.5 prob to either class
self.train_monitor.current_tree_weights = np.zeros(X.shape[0])
self.feature_weights = [1/self.num_features]*self.num_features
# Initialise Gaussian DP parameters
if "gaussian" in self.dp_method:
self.privacy_accountant.assign_budget(self.privacy_accountant.epsilon, 1 / X.shape[0], num_rows=X.shape[0], num_features=X.shape[1]) # Update delta to 1/n
self.train_monitor.end_timing_event("server", "initialise model weights")
# Form histogram bin assignments for each feature - this caching saves a lot of time for histogram based gradient aggregation later on
self.train_monitor.start_timing_event("client", "histogram building")
for i in range(0, self.num_features):
self.feature_bin.append(np.digitize(self.X[:, i], bins=[-np.inf] + list(np.array(self.split_candidate_manager.feature_split_candidates[i]) + 1e-11) + [np.inf]))
self.train_monitor.end_timing_event("client", "histogram building")
self.feature_bin = np.array(self.feature_bin)
features = np.array(range(0, self.num_features))
previous_rounds_features = None
for i in range(0, self.num_trees):
self.train_monitor.node_count = -1 # Reset node count for new trees
if self.split_candidate_manager.sketch_each_tree:
if self.split_candidate_manager.sketch_type == "adaptive_hessian" and len(self.trees) >= self.split_candidate_manager.sketch_rounds:
pass
else:
features_updated = previous_rounds_features if previous_rounds_features is not None else list(range(0, self.num_features))
self.train_monitor.start_timing_event("server", f"split_candidates")
self.split_candidate_manager.find_split_candidates(X, len(self.trees), self.root_hessian_histogram, features_considering=features_updated)
self.train_monitor.end_timing_event("server", f"split_candidates")
self.train_monitor.start_timing_event("client", "histogram building")
for j in features_updated:
self.feature_bin[j] = np.digitize(self.X[:, j], bins=[-np.inf] + list(np.array(self.split_candidate_manager.feature_split_candidates[j]) + 1e-11) + [np.inf])
self.train_monitor.end_timing_event("client", "histogram building")
# TODO: Track here for communication (split candidates)
self.train_monitor.update_received(range(0, X.shape[0]), 8*len(features_updated)*len(self.split_candidate_manager.feature_split_candidates[0]))
self.train_monitor.start_timing_event("server", "pre-tree ops")
# Row and Feature Sampling if enabled
row_sample, col_tree_sample, col_level_sample = self.index_sampler.sample(i, X.shape[0], X.shape[1], self.max_depth, feature_interaction_k=self.feature_interaction_k, feature_interaction_method=self.feature_interaction_method)
previous_rounds_features = col_tree_sample
split_constraints = {i : [0, len(self.split_candidate_manager.feature_split_candidates[i])+1] for i in range(0,self.num_features)}
if i != 0:
self.privacy_accountant.update_tree() # Increment tree count in privacy_accountant, used to index tree_budgets
self.train_monitor.end_timing_event("server", "pre-tree ops")
if i==0 or self.training_method == "boosting" or (self.training_method == "batched_boosting" and (i % self.batched_update_size == 0)):
if self.training_method == "batched_boosting":
self.train_monitor.y_weights += self.train_monitor.batched_weights / self.batched_update_size
self.train_monitor.batched_weights = np.zeros(self.X.shape[0])
# TODO: Track communication (batched updates)
if i != 0:
self.train_monitor.update_sent(range(0, self.X.shape[0]), 8*2*self.batched_update_size*self.train_monitor.leaf_count_tracker[-1])
self.train_monitor.start_timing_event("client", f"computing gradients")
grads, hess = self._compute_grad_hessian_with_samples(y, self.loss.predict(self.train_monitor.y_weights)) # Compute raw grads,hess
self.train_monitor.end_timing_event("client", f"computing gradients")
self.train_monitor.gradient_info = [(grads, hess)] # Append to gradient_info, at each node this is retrieved and privatised with DP to calculate feature scores etc
tree = self._build_tree(features, row_sample, None, None,
split_constraints=split_constraints, col_tree_sample=col_tree_sample, col_level_sample=col_level_sample, row_ids=np.arange(0,X.shape[0]))
self.trees.append(tree) # Build and add tree to ensemble
self.train_monitor.start_timing_event("server", "post-tree ops")
if self.training_method == "batched_boosting":
self.train_monitor.batched_weights += self.train_monitor.current_tree_weights
if i==self.num_trees-1 and (i+1) % self.batched_update_size != 0:
# TODO: Track communication (batched updates)
self.train_monitor.update_sent(range(0, self.X.shape[0]), 8*2*self.batched_update_size*self.train_monitor.leaf_count_tracker[-1])
self.train_monitor.y_weights += self.train_monitor.batched_weights / ((i+1) % self.batched_update_size)
elif i==self.num_trees-1:
# TODO: Track communication (batched updates)
self.train_monitor.update_sent(range(0, self.X.shape[0]), 8*2*self.batched_update_size*self.train_monitor.leaf_count_tracker[-1])
else:
self.train_monitor.y_weights += self.train_monitor.current_tree_weights # Update weights
self.train_monitor.leaf_gradient_tracker[0].append(self.train_monitor.gradient_total[0])
self.train_monitor.leaf_gradient_tracker[1].append(self.train_monitor.gradient_total[1])
threshold_change = self.es_threshold
window = self.es_window
if len(self.trees) >= 2*self.es_window and self.early_stopping:
if self.es_metric == "leaf_hess":
es_metric = self.train_monitor.leaf_gradient_tracker[1]
elif self.es_metric == "leaf_grad":
es_metric = self.train_monitor.leaf_gradient_tracker[0]
elif self.es_metric == "root_grad":
es_metric = self.train_monitor.root_gradient_tracker[0]
else:
es_metric = self.train_monitor.root_gradient_tracker[1]
current_window_hess = abs(np.mean(es_metric[-window:])) if "grad" in self.es_metric else np.mean(es_metric[-window:])
previous_window_hess = abs(np.mean(es_metric[-2*window:-window])) if "grad" in self.es_metric else np.mean(es_metric[-2*window:-window])
per_change = (previous_window_hess/current_window_hess-1)*100
if ("standard" in self.early_stopping or self.early_stopping == "rollback" or "average" in self.early_stopping) and per_change < threshold_change:
print("Early Stopping at round", i+1)
if self.early_stopping == "rollback":
self.trees = self.trees[:-1]
self.train_monitor.y_weights -= self.train_monitor.current_tree_weights
break
elif self.early_stopping == "retry" and per_change < threshold_change: #es_metric[-2] - es_metric[-1] < 0
prune_step = -2 if "root" in self.es_metric else -1
self.trees = self.trees[:prune_step]
self.train_monitor.gradient_info = self.train_monitor.gradient_info[:prune_step]
self.train_monitor.root_gradient_tracker[0] = self.train_monitor.root_gradient_tracker[0][:prune_step]
self.train_monitor.root_gradient_tracker[1] = self.train_monitor.root_gradient_tracker[1][:prune_step]
self.train_monitor.leaf_gradient_tracker[0] = self.train_monitor.leaf_gradient_tracker[0][:prune_step]
self.train_monitor.leaf_gradient_tracker[1] = self.train_monitor.leaf_gradient_tracker[1][:prune_step]
self.train_monitor.y_weights -= self.train_monitor.current_tree_weights + (prune_step+1)*-1*self.train_monitor.previous_tree_weights # If root then remove 2 trees, if leaf remove 1
self.train_monitor.end_timing_event("server", "post-tree ops")
# Reset tracking vars
self.train_monitor._update_comm_stats(self.split_method, self.training_method)
self.train_monitor.reset()
if self.early_stopping and "retrain" in self.early_stopping and len(self.trees) < self.num_trees:
# Calculate new budget
old_eps = self.epsilon
self.epsilon = self.epsilon - self.privacy_accountant._autodp_check(self.privacy_accountant.sigma_arr, len(self.trees))
if "majority" in self.early_stopping and self.epsilon >= old_eps:
print("Budget leftover is not enough to train model... exiting with early stopped model")
return self
else:
# Reset parameters
old_trees = []
if "average" in self.early_stopping:
old_trees = self.trees
es_method = self.early_stopping
self._reset_tracking_attributes(False)
self._reset_accountant()
self.num_trees = len(self.trees)
self.trees = []
self.privacy_accountant.assign_budget(self.epsilon, 1/X.shape[0], X.shape[0], X.shape[1])
self.early_stopping = None
self.fit(X,y) # Retrain model
self.trees = old_trees + self.trees
self.early_stopping = es_method
return self
self.root = self.trees[0]
if self.training_method == "rf":
self.train_monitor.y_weights /= len(self.trees)
if self.verbose:
print("Number of trees trained", len(self.trees))
if self.dp_method != "":
# Track budget spent by participants - for debugging
scale_factor = 2 if self.privacy_accountant.gradient_method == "vector_mechanism" else 1
print("\n[Ledger] Average number of queries done by a participant :", np.mean(self.privacy_accountant.ledger)/scale_factor)
print("[Ledger] Minimum queries done by a participant:", np.min(self.privacy_accountant.ledger)/scale_factor)
print("[Ledger] Maximum queries done by a participant:", np.max(self.privacy_accountant.ledger)/scale_factor, "\n")
self.y_weights = self.train_monitor.y_weights
if self.output_train_monitor:
print(f"Size of dataset n,m={self.X.shape}")
self.train_monitor.output_summary()
return self
def _calculate_feature_split(self, features_considering, split_index, current_depth, total_gain, total_grads, total_hess, grads, hess, feature_split_constraints):
"""
Calculates split scores for a specific features and all their split candidate values
:param feature_i: Feature index
:param feature_values: Feature values
:param current_depth: Current depth in the tree
:param total_gain: Total gain
:param total_grads: Total grads
:param total_hess: Total hess
:param grads: list of grads
:param hess: list of hess
:return:
"""
# Iterate through all unique values of feature column i and calculate the impurity
values = []
current_max_score = -1e20
split_method = self.split_method_per_level[current_depth]
self.current_tree_depth = max(self.train_monitor.current_tree_depth, current_depth)
valid_features = []
for i in features_considering:
if feature_split_constraints[i][0] < min(feature_split_constraints[i][1], len(self.split_candidate_manager.feature_split_candidates[i])): # Ignore features that cannot be split on any further
valid_features.append(i)
if len(valid_features) == 0:
return []
if split_method == "totally_random":
new_features = valid_features
weights = None
chosen_feature = np.random.choice(new_features, p=weights)
chosen_split = np.random.choice(range(feature_split_constraints[chosen_feature][0], min(feature_split_constraints[chosen_feature][1], len(self.split_candidate_manager.feature_split_candidates[chosen_feature])))) # Upper split constraint may be len(feature_splits)+1 so needs to be truncated back down
# This is due to how the hist_based splitting logic works because of the splicing
# TODO: Track communication (internal splits)
if split_method != "totally_random":
for feature_i in valid_features:
constraints = feature_split_constraints[feature_i]
self.train_monitor.bin_tracker[current_depth] += constraints[1] - constraints[0]
for feature_i in valid_features:
split_constraints = feature_split_constraints[feature_i]
if split_method == "partially_random":
chosen_split = random.randint(split_constraints[0], min(split_constraints[1], len(self.split_candidate_manager.feature_split_candidates[feature_i])))
elif split_method == "totally_random" and feature_i != chosen_feature:
continue
if split_method == "hist_based":
cumulative_grads = np.cumsum(self.private_gradient_histogram[feature_i][split_constraints[0]: split_constraints[1]+1])
cumulative_hess = np.cumsum(self.private_hessian_histogram[feature_i][split_constraints[0]: split_constraints[1]+1])
total_grads_cu = cumulative_grads[-1]
total_hess_cu = cumulative_hess[-1]
for j, threshold in enumerate(self.split_candidate_manager.feature_split_candidates[feature_i]):
if (split_method == "partially_random" or split_method == "totally_random") and j != chosen_split:
continue
if split_constraints[0] <= j <= split_constraints[1] or self.ignore_split_constraints: # Only add split if it isn't one-sided (based on public knowledge of previous splits)
# Calculate impurity score of proposed split
if split_method == "hist_based":
left_grads_sum = cumulative_grads[j-(split_constraints[0])]
left_hess_sum = cumulative_hess[j-(split_constraints[0])]
if self.hist_estimator_method == "one-sided":
right_grads_sum = total_grads - left_grads_sum
right_hess_sum = total_hess - left_hess_sum
else:
right_grads_sum = total_grads_cu - left_grads_sum
right_hess_sum = total_hess_cu - left_hess_sum
if self.hist_estimator_method == "two_sided":
total_grads = self.private_gradient_histogram[feature_i][split_constraints[0]: split_constraints[1]].sum()
total_hess = self.private_hessian_histogram[feature_i][split_constraints[0]: split_constraints[1]].sum()
total_gain = self._calculate_gain(total_grads, total_hess)
split_score = self._calculate_split_score(self._calculate_gain(left_grads_sum, left_hess_sum), self._calculate_gain(right_grads_sum, right_hess_sum), total_gain)
elif split_method == "node_based" or split_method == "partially_random":
new_split_index = self.X[split_index, feature_i] <= threshold
left_grads_sum, left_hess_sum = self.privacy_accountant._add_dp_noise(grads[new_split_index].sum(), hess[new_split_index].sum(), current_depth, feature=feature_i, num_obs=len(new_split_index))
right_grads_sum = total_grads - left_grads_sum
right_hess_sum = total_hess - left_hess_sum
split_score = self._calculate_split_score(self._calculate_gain(left_grads_sum, left_hess_sum), self._calculate_gain(right_grads_sum, right_hess_sum), total_gain)
else: # In the case of totally random no score are computed
split_score = float("inf")
left_grads_sum, left_hess_sum, right_grads_sum, right_hess_sum = [float("inf")]*4
if split_score > current_max_score:
# Divide X and y depending on if the feature value of X at index feature_i meets the threshold
values = [feature_i, j, threshold, split_score, None, (left_grads_sum, left_hess_sum, right_grads_sum, right_hess_sum)]
current_max_score = split_score
return values
def _form_private_gradient_histogram(self, grads, hess, features_considering, split_index, current_depth, adaptive_hessian=False):
self.train_monitor.start_timing_event("server", "initialise private histogram")
self.train_monitor.start_timing_event("client", "initialise private histogram")
if current_depth == 0:
self.gradient_histogram = {}
self.hessian_histogram = {}
self.private_gradient_histogram = {i: np.zeros(len(self.split_candidate_manager.feature_split_candidates[i])+1) for i in features_considering}
self.private_hessian_histogram = {i: np.zeros(len(self.split_candidate_manager.feature_split_candidates[i])+1) for i in features_considering}
if current_depth == 0 and len(self.trees) == 0:
self.root_hessian_histogram = {i: np.zeros(len(self.split_candidate_manager.feature_split_candidates[i])+1) for i in features_considering}
self.train_monitor.end_timing_event("server", "initialise private histogram")
self.train_monitor.end_timing_event("client", "initialise private histogram")
for i in features_considering:
self.train_monitor.start_timing_event("client", "forming gradient + hess histogram")
num_bins = len(self.split_candidate_manager.feature_split_candidates[i])+1
digitized = self.feature_bin[i][split_index]
self.gradient_histogram[i] = np.array(histogram1d(digitized, bins=num_bins, range=[0, num_bins+0.1], weights=grads)) # Fast C histogram implementation
self.hessian_histogram[i] = np.array(histogram1d(digitized, bins=num_bins, range=[0, num_bins+0.1], weights=hess))
self.train_monitor.end_timing_event("client", "forming gradient + hess histogram")
self.train_monitor.start_timing_event("server", "adding noise to gradient + hess histogram")
if self.dp_method != "":
if adaptive_hessian:
_, self.root_hessian_histogram[i] = self.privacy_accountant._add_dp_noise(self.gradient_histogram[i], self.hessian_histogram[i],
depth=self.max_depth-1,
feature=i, histogram_row=True, noise_size=num_bins, adaptive_hessian=True)
else:
self.private_gradient_histogram[i], self.private_hessian_histogram[i] = self.privacy_accountant._add_dp_noise(self.gradient_histogram[i], self.hessian_histogram[i],
depth=current_depth,
feature=i, histogram_row=True, noise_size=num_bins)
self.train_monitor.end_timing_event("server", "adding noise to gradient + hess histogram")
# TODO: Communication
if current_depth == 0 and adaptive_hessian:
self.train_monitor.update_sent(range(0, self.X.shape[0]), 8*num_bins, increment_round=False)
# TODO: Communication
if current_depth == 0 and adaptive_hessian:
self.train_monitor.client_rounds_sent[-1] += 1
if self.dp_method == "":
self.private_gradient_histogram, self.private_hessian_histogram = self.gradient_histogram, self.hessian_histogram
def _get_node_id(self, depth, node_num):
return str(depth) + "_" + str(node_num)
def _build_tree(self, features, split_index, node_total_grads, node_total_hess,
current_depth=0, col_tree_sample=None, col_level_sample=None, row_ids=None, split_constraints=None, previous_node_num=1):
"""
Main method for building a tree of the ensemble
:param split_index: Boolean index of current observations in the node
:param node_total_grads: Total gradients of the node
:param node_total_hess: Total hessians of the node
:param current_depth: Current depth of the node
:param col_tree_sample: Boolean index of features to sample if self.colsample_bynode is not 1
:param col_level_sample: Boolean index of features to sample if self.colsample_bylevel is not 1
:return:
"""
self.train_monitor.start_timing_event("server", "sampling features for node")
features_considering = features
# Perform column (feature) sampling if needed
if col_tree_sample is not None:
features_considering = features_considering[col_tree_sample]
if col_level_sample is not None:
features_considering = features_considering[col_level_sample[current_depth]]
if self.index_sampler.colsample_bynode < 1:
features_considering = features_considering[np.random.choice(range(0, len(features_considering)), size=math.ceil(len(features_considering) * self.index_sampler.colsample_bynode), replace=False)]
self.train_monitor.node_count += 1
self.privacy_accountant.current_node = self.train_monitor.node_count
self.privacy_accountant.current_tree = len(self.trees)
split_method = self.split_method_per_level[min(current_depth, self.max_depth-1)]
self.train_monitor.end_timing_event("server", "sampling features for node")
self.train_monitor.start_timing_event("client", "retrieving grads/hess for node")
# Obtain raw gradients/hessians for the observations in the current node
grads, hess = self.train_monitor.gradient_info[-1][0][split_index], self.train_monitor.gradient_info[-1][1][split_index]
self.train_monitor.end_timing_event("client", "retrieving grads/hess for node")
if self.num_classes > 2:
grads, hess = self.train_monitor.gradient_info[-1][0][split_index], self.train_monitor.gradient_info[-1][1][split_index]
raw_grads_sum, raw_hess_sum = np.sum(grads, axis=0), np.sum(hess, axis=0)
else:
raw_grads_sum, raw_hess_sum = grads.sum(), hess.sum()
# raw_grads_sum, raw_hess_sum = self.train_monitor.gradient_info[-1][0].sum(where=split_index), self.train_monitor.gradient_info[-1][1].sum(where=split_index)
if current_depth == 0: # Update private grads at root node
if split_method == "node_based" or split_method == "partially_random":
node_total_grads, node_total_hess = self.privacy_accountant._add_dp_noise(raw_grads_sum, raw_hess_sum, -1, num_obs=len(split_index)) # Depth zero
if self.dp_method != "" and self.track_budget:
self.privacy_accountant.commit_budget_to_ledger(split_index)
elif split_method == "totally_random":
node_total_hess = float("inf")
node_total_grads = float("inf")
if self.split_candidate_manager.sketch_type == "adaptive_hessian" and len(self.trees) < self.split_candidate_manager.sketch_rounds:
self._form_private_gradient_histogram(grads, hess, features_considering, split_index, current_depth, adaptive_hessian=True) # Adaptive hess test
else:
self._form_private_gradient_histogram(grads, hess, features_considering, split_index, current_depth) # Form privatised grads,hess
if self.dp_method != "" and self.track_budget:
self.privacy_accountant.commit_budget_to_ledger(split_index)
node_total_grads = sum([i.sum() for i in self.private_gradient_histogram.values()])/len(self.private_gradient_histogram.values())
node_total_hess = sum([i.sum() for i in self.private_hessian_histogram.values()])/len(self.private_hessian_histogram.values())
self.train_monitor.root_gradient_tracker[0].append(node_total_grads)
self.train_monitor.root_gradient_tracker[1].append(node_total_hess)
# If the spliting conditions are satisfied then split the current node otherwise stop and make it a leaf
if (node_total_hess >= self.min_child_weight or split_method == "totally_random") and (current_depth < self.max_depth):
if split_method == "hist_based" and current_depth > 0:
self._form_private_gradient_histogram(grads, hess, features_considering, split_index, current_depth) # Form privatised grads,hess
if self.hist_estimator_method == "two_sided_averaging" or node_total_grads == float("inf"):
node_total_grads = sum([i.sum() for i in self.private_gradient_histogram.values()])/len(self.private_gradient_histogram.values())
node_total_hess = sum([i.sum() for i in self.private_hessian_histogram.values()])/len(self.private_hessian_histogram.values())
self.train_monitor.start_timing_event("server", "calculating internal split")
# Calculate current nodes total gain
node_gain = self._calculate_gain(node_total_grads, node_total_hess)
# Find best (feature, split) candidates for each feature
split_data = self._calculate_feature_split(features_considering, split_index, current_depth, node_gain, node_total_grads, node_total_hess, grads, hess, split_constraints)
# Commit budget spent by participants for computing split scores
if self.dp_method != "" and self.track_budget:
self.privacy_accountant.commit_budget_to_ledger(split_index)
self.train_monitor.end_timing_event("server", "calculating internal split")
if split_data:
chosen_feature, bucket_index, chosen_threshold, largest_score, left_split_index, split_gradient_info = split_data
if left_split_index is None:
left_split_index = self.X[split_index, chosen_feature] <= chosen_threshold
left_grads_sum, left_hess_sum, right_grads_sum, right_hess_sum = split_gradient_info # Gradient information to pass to child nodes
right_split_index = split_index[~left_split_index]
left_split_index = split_index[left_split_index]
if largest_score > self.reg_gamma:
self.train_monitor.start_timing_event("server", "updating split constraints")
# Update feature split constraints with valid feature split candidate index bounds - this stops the algo from picking one-sided splits later on
left_split_constraints = copy(split_constraints)
left_split_constraints[chosen_feature] = [left_split_constraints[chosen_feature][0], bucket_index-1]
right_split_constraints = copy(split_constraints)
right_split_constraints[chosen_feature] = [bucket_index+1, right_split_constraints[chosen_feature][1]]
self.train_monitor.end_timing_event("server", "updating split constraints")
# Build subtrees recursively for the right and left branches
self.train_monitor.last_feature = chosen_feature
left_num = 2*(previous_node_num)-1
right_num = 2*(previous_node_num)
left_branch = self._build_tree(features, left_split_index, left_grads_sum, left_hess_sum,
current_depth + 1, col_tree_sample, col_level_sample, split_constraints=left_split_constraints, previous_node_num=left_num)
right_branch = self._build_tree(features, right_split_index, right_grads_sum, right_hess_sum,
current_depth + 1, col_tree_sample, col_level_sample, split_constraints=right_split_constraints, previous_node_num=right_num)
self.train_monitor.internal_node_count[current_depth] += 1
return DecisionNode(node_id=str(current_depth) + "_" + str(previous_node_num), feature_i=chosen_feature, threshold=chosen_threshold, true_branch=left_branch, false_branch=right_branch, split_gain=largest_score, gradient_sum=node_total_grads, hessian_sum=node_total_hess, num_observations=len(split_index), depth=current_depth)
self.train_monitor.start_timing_event("server", "leaf weight")
# We're at leaf => determine weight
if split_method == "totally_random":
if self.dp_method != "" and self.dp_method != "gaussian_ldp":
size = self.num_classes if self.num_classes > 2 else None
raw_grads_sum = raw_grads_sum.sum()
raw_hess_sum = raw_hess_sum.sum()
node_total_grads = raw_grads_sum + np.random.normal(0, self.privacy_accountant.gaussian_var(gradient_type="gradient", depth=self.max_depth-1), size=size)
node_total_hess = raw_hess_sum + np.random.normal(0, self.privacy_accountant.gaussian_var(gradient_type="hessian", depth=self.max_depth-1), size=size)
if self.track_budget:
self.privacy_accountant.commit_budget_to_ledger(split_index)
else:
node_total_grads, node_total_hess = raw_grads_sum, raw_hess_sum
# Calculate leaf weight based on DP grads/hess
leaf_weight = self._calculate_model_update(node_total_grads, node_total_hess, None, None) # pass grads/hess?
# TODO: Track comm (weight)
if self.training_method != "batched_boosting":
# self.train_monitor.update_sent(split_index, 8*2, increment_round=False)
pass
self.train_monitor.end_timing_event("server", "leaf weight")
# Update training information...
self.train_monitor.gradient_total[0] += node_total_grads
self.train_monitor.gradient_total[1] += node_total_hess
self.train_monitor.leaf_count += 1
self.train_monitor.current_tree_weights[split_index] += leaf_weight
# split_gain = self._calculate_gain(node_total_grads, node_total_hess)
if self.num_classes == 2:
leaf_weight = np.array([leaf_weight])
return DecisionNode(node_id=str(current_depth) + "_" + str(previous_node_num), value=leaf_weight, num_observations=len(split_index), gradient_sum=node_total_grads,
hessian_sum=node_total_hess,
split_gain=0,
feature_i=self.train_monitor.last_feature)
def _calculate_model_update(self,node_total_grads, node_total_hess, grads=None, hess=None):
if self.training_method == "rf": # RF update
if node_total_hess <= 0:
leaf_weight = 0.5
elif node_total_grads <= 0:
leaf_weight = 0
else:
leaf_weight = node_total_grads/node_total_hess
if leaf_weight > 1:
leaf_weight = 1
else: # Grad or Newton update
if self.num_classes > 2:
leaf_weight = self._calculate_leaf_weight(node_total_grads, node_total_hess)
else:
if node_total_hess <= self.privacy_accountant.min_hess:
leaf_weight = 0
else:
node_total_grads = np.clip(node_total_grads, self.privacy_accountant.min_gradient*self.X.shape[0], self.privacy_accountant.max_gradient*self.X.shape[0])
node_total_hess = np.clip(node_total_hess, self.privacy_accountant.min_hess*self.X.shape[0], self.privacy_accountant.max_hess*self.X.shape[0])
leaf_weight = self._calculate_leaf_weight(node_total_grads, node_total_hess)
# Signed or individual updates...
if "signed" in self.weight_update_method or "per_sample" in self.weight_update_method:
leaf_weight = (-grads/(hess+self.reg_lambda)) if "newton" in self.weight_update_method else -grads
# leaf_weight = np.sign(leaf_weight) # local sign - doesnt work...
leaf_weight = leaf_weight.sum()
if "signed" in self.weight_update_method:
if leaf_weight < 0:
leaf_weight = -self.reg_delta
elif leaf_weight > 0:
leaf_weight = self.reg_delta
else:
leaf_weight = 0
leaf_weight = np.clip(leaf_weight, -self.reg_delta, self.reg_delta)
leaf_weight *= self.reg_eta
# print(leaf_weight)
return leaf_weight
# Feature Importance
# --------------------------------------------------------------------------------------
# See https://xgboost.readthedocs.io/en/latest/R-package/discoverYourData.html#feature-importance for more details on how to compute feature importance
def _traverse_tree(self, tree, feature_importance_map, feature_count_map, threshold_values, depth_map, method):
"""
Recursively traverses a tree and calculates various statistics needed for feature importance
:param tree: TreeBase object
:param feature_importance_map: map of tree ids to feature importance values
:param feature_count_map: map of feature values to counts of the time it's appeared in a split
:param threshold_values: Split values chosen
:param method: Feature importance method
:return:
"""
if tree is None:
return
if "gain" in method and tree.split_gain is not None and tree.split_gain != float("inf") and tree.feature_i != -1:
feature_importance_map[tree.feature_i] += tree.split_gain
threshold_values[tree.feature_i].append(tree.threshold)
feature_count_map[tree.feature_i] += 1
depth_map[tree.depth] += tree.split_gain
elif "cover" in method and tree.hessian_sum is not None and tree.hessian_sum != float("inf") and tree.feature_i != -1:
feature_importance_map[tree.feature_i] += tree.hessian_sum
threshold_values[tree.feature_i].append(tree.threshold)
feature_count_map[tree.feature_i] += 1
depth_map[tree.depth] += tree.hessian_sum
self._traverse_tree(tree.true_branch, feature_importance_map, feature_count_map, threshold_values, depth_map, method)
self._traverse_tree(tree.false_branch, feature_importance_map, feature_count_map, threshold_values, depth_map, method)
def feature_importance(self, method="gain", return_all=False):
"""
Calculates feature importance from the trained model
:param method: Feature importance method from "gain", "cover", "average_gain", "average_cover"
:return: Map of features to their importance
"""
if len(self.trees) == 0:
Exception("Cannot calculate feature importance from an untrained model. Use .fit(X,y) first")
feature_importance_map = Counter({k: 0 for k in self.feature_list})
feature_count_map = Counter({k: 0 for k in self.feature_list})
threshold_values = {k: [] for k in self.feature_list}
for i, tree in enumerate(self.trees):
depth_map = defaultdict(int)
self._traverse_tree(tree, feature_importance_map, feature_count_map, threshold_values, depth_map, method)
if "average" in method: # average, by default total gain/cover will be used
for k in feature_importance_map.keys():
feature_importance_map[k] = feature_importance_map[k] / feature_count_map[k]
if return_all:
return feature_importance_map, feature_count_map, threshold_values, depth_map
else:
return feature_importance_map
| 57,288 | 61.748083 | 346 | py |
federated-boosted-dp-trees | federated-boosted-dp-trees-master/experiments/paper_experiments/paper_experiments.py | import math
import numpy as np
from experiments.experiment_helpers.data_loader import DataLoader
from experiments.experiment_helpers.experiment_runner import ExperimentRunner
from dev.communication_framework import CommunicationsFramework
global_seeds = [1, 4, 100, 333, 1002]
data_loader = DataLoader([1, 4, 100, 333, 1002])
replication_path = "../replication_experiments/replication_data/"
# =================== Paper Experiments ===================
# =================== E1 + E2 - Split Methods + Weight Updates ===================
# Exp 1 - Split methods + Weight updates
# Corresponds to Figure 1 (a,b,c), Table 2 in main text, Figures 7-10 and Table 7-10 in the Appendix
def dp_split_methods_with_update_methods(save_data=False, filename="dp_split_methods_with_update1", replication=False, iters=3, datasets=None, seeds=None):
if not replication:
data_loader = DataLoader(global_seeds)
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "Bank", "nomao"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [5, 10, 25, 50, 100, 150, 200, 250, 300]
hist_sizes=[32]
depths = [2,3,4,5,6]
epsilons = [0.1, 0.25, 0.5, 0.75, 1]
update_methods = ["rf", "xgboost", "gbm"]
else:
iters = iters
data_loader = DataLoader(seeds)
datasets = data_loader.load_datasets(datasets, remove_missing=True, return_dict=True, verbose=True).items()
num_trees = [5, 10, 25, 50, 100, 150, 200, 250, 300]
hist_sizes=[32]
depths = [2,3,4,5,6]
epsilons = [0.1, 0.25, 0.5, 0.75, 1]
update_methods = ["xgboost"]
arg_options = [{"dp_method": "gaussian_cdp", "split_method": "totally_random", "gradient_budgets": "vector_mechanism"},
{"dp_method": "gaussian_cdp", "split_method": "partially_random", "gradient_budgets": "vector_mechanism"},
{"dp_method": "gaussian_cdp", "split_method": "hist_based", "gradient_budgets": "vector_mechanism"},]
new_arg_options = []
for d in depths:
for T in num_trees:
for update_method in update_methods:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["track_budget"] = False
new_args["verbose"] = False
if update_method == "rf":
new_args["training_method"] = "rf"
else:
new_args["weight_update_method"] = update_method
new_arg_options.append(new_args)
if replication:
exp = ExperimentRunner(performance=True, data_path=replication_path)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
else:
exp = ExperimentRunner(performance=True)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
# Exp 2 - Non-DP Split methods + Weight updates
# Not used in the paper
def non_dp_split_methods_with_update_methods():
exp = ExperimentRunner()
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "Bank", "nomao"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [5, 10, 25, 50, 100, 150, 200, 250, 300]
hist_sizes=[32]
depths = [2,3,4,5,6]
epsilons = [0]
# Testing args
# depths=[2]
# epsilons=[0.5]
# num_trees=[50]
# iters=1
# datasets = data_loader.load_datasets(["Credit 1"], remove_missing=True, return_dict=True, verbose=True).items()
update_methods = ["rf", "xgboost", "gbm"]
arg_options = [{"dp_method": "", "split_method": "totally_random",},
{"dp_method": "", "split_method": "partially_random",},
{"dp_method": "", "split_method": "hist_based",},]
new_arg_options = []
for d in depths:
for T in num_trees:
for update_method in update_methods:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["track_budget"] = False
new_args["verbose"] = False
if update_method == "rf":
new_args["training_method"] = "rf"
else:
new_args["weight_update_method"] = update_method
new_arg_options.append(new_args)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=False, filename="non_dp_split_methods_with_update1", iterated_param=("epsilon", epsilons))
# Exp 3 - Gradient Budget Allocation
# Not used in the paper
def gradient_budget_alloc():
exp = ExperimentRunner()
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "Bank", "nomao"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [100, 200, 300]
hist_sizes=[32]
depths = [2,4,6]
epsilons = [0.1, 0.25, 0.5, 0.75, 1]
# Testing args
# depths=[2]
# epsilons=[0.5]
# num_trees=[50]
# iters=1
# datasets = data_loader.load_datasets(["Credit 1"], remove_missing=True, return_dict=True, verbose=True).items()
update_methods = ["rf", "xgboost", "gbm"]
arg_options = [{"dp_method": "gaussian_cdp", "split_method": "hist_based", "gradient_budgets": "vector_mechanism"},
{"dp_method": "gaussian_cdp", "split_method": "hist_based", "gradient_budgets": [0.9, 0.1]},
{"dp_method": "gaussian_cdp", "split_method": "totally_random", "gradient_budgets": "vector_mechanism"},
{"dp_method": "gaussian_cdp", "split_method": "totally_random", "gradient_budgets": [0.9, 0.1]},]
new_arg_options = []
for d in depths:
for T in num_trees:
for update_method in update_methods:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
if new_args["split_method"] == "hist_based":
new_args["num_trees"] = T//10
else:
new_args["num_trees"] = T
new_args["track_budget"] = False
new_args["verbose"] = False
if update_method == "rf":
new_args["training_method"] = "rf"
else:
new_args["weight_update_method"] = update_method
new_arg_options.append(new_args)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=False, filename="gradient_budget_alloc1", iterated_param=("epsilon", epsilons))
# =================== E3 - Split Candidate Methods ===================
# Exp 4 - Split candidate methods
# Corresponds to Figure 2 (a,b,c) and Table 3 in the main text, Figure 11 and 12 in the Appendix
def dp_split_candidate_methods(save_data=False, filename="split_candidate_methods1", replication=False, iters=3, datasets=None, seeds=None):
if not replication:
data_loader = DataLoader(global_seeds)
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "nomao", "Bank", "higgs_sampled"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [100, 200, 300]
depths = [4]
hist_sizes = [2, 4, 8, 16, 32, 64, 128]
epsilons = [0.1, 0.25, 0.5, 0.75, 1]
sketch_rounds = [5, 10, 20, 30, float("inf")]
hist_methods = ["uniform", "log", "exact_quantiles", "feverless", "adaptive_hessian"]
else:
data_loader = DataLoader(seeds)
iters = iters
datasets = data_loader.load_datasets(datasets, remove_missing=True, return_dict=True, verbose=True).items()
num_trees = [100, 200, 300]
depths = [4]
hist_sizes = [2, 4, 8, 16, 32, 64, 128]
epsilons = [0.1, 0.25, 0.5, 0.75, 1]
sketch_rounds = [5, 10, 20, 30, float("inf")]
hist_methods = ["uniform", "log", "exact_quantiles", "feverless", "adaptive_hessian"]
arg_options = [{"dp_method": "gaussian_cdp", "split_method": "totally_random",},]
new_arg_options = []
for d in depths:
for T in num_trees:
for h in hist_methods:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
if new_args["split_method"] == "hist_based" and new_args.get("feature_interaction_method") == None:
new_args["num_trees"] = T//10
else:
new_args["num_trees"] = T
new_args["verbose"] = False
new_args["track_budget"] = False
new_args["sketch_type"] = h
if (new_args["split_method"] == "hist_based" or new_args["split_method"] == "totally_random") and new_args.get("feature_interaction_method") == None and h=="adaptive_hessian":
for s in sketch_rounds:
new_new_args = new_args.copy()
new_new_args["sketch_rounds"] = s
new_arg_options.append(new_new_args)
else:
new_arg_options.append(new_args)
if replication:
exp = ExperimentRunner(performance=True, data_path=replication_path)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
else:
exp = ExperimentRunner(performance=False)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
# Not used in the paper
def non_dp_split_candidate_methods():
exp = ExperimentRunner()
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "nomao", "Bank", "higgs_sampled"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 1
num_trees = [100,200,300]
depths = [3,4]
hist_sizes = [2, 4, 8, 16, 32, 64, 128]
epsilons = [0]
sketch_rounds = [5, 10, 20, 30, float("inf")]
hist_methods = ["uniform", "log", "exact_quantiles", "feverless", "adaptive_hessian"]
arg_options = [{"dp_method": "gaussian_cdp", "split_method": "totally_random",},]
# arg_options = [{"dp_method": "", "split_method": "totally_random",},
# {"dp_method": "", "split_method": "totally_random",
# "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# {"dp_method": "", "split_method": "hist_based",},
# {"dp_method": "", "split_method": "hist_based",
# "feature_interaction_method": "cyclical", "feature_interaction_k": 1},]
new_arg_options = []
for d in depths:
for T in num_trees:
for h in hist_methods:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
if new_args["split_method"] == "hist_based" and new_args.get("feature_interaction_method") == None:
new_args["num_trees"] = T//10
else:
new_args["num_trees"] = T
new_args["track_budget"] = False
new_args["sketch_type"] = h
if (new_args["split_method"] == "hist_based" or new_args["split_method"] == "totally_random") and new_args.get("feature_interaction_method") == None and h=="adaptive_hessian":
for s in sketch_rounds:
new_new_args = new_args.copy()
new_new_args["sketch_rounds"] = s
new_arg_options.append(new_new_args)
else:
new_arg_options.append(new_args)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=False, filename="non_dp_split_candidate_methods1", iterated_param=("epsilon", epsilons))
# =================== E4 - Feature Interactions ===================
# Exp 5 - k-way methods
# Corresponds to Figure 3
def feature_interaction_experiments(save_data=False, filename="k_way", replication=False, iters=3, datasets=None, seeds=None):
if not replication:
data_loader = DataLoader(global_seeds)
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "Bank", "nomao"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [10, 25, 50, 75, 100, 200, 300]
depths = [2,3,4]
hist_sizes=[32]
epsilons = [0.01, 0.05, 0.1, 0.5, 1]
feature_k =[1,2,3,4,5, None]
feature_interaction_methods = ["cyclical", "random"]
else:
data_loader = DataLoader(seeds)
datasets = data_loader.load_datasets(datasets, remove_missing=True, return_dict=True, verbose=True).items()
iters = iters
num_trees = [10, 25, 50, 75, 100, 200, 300]
depths = [4]
hist_sizes=[32]
epsilons = [1]
feature_k =[1,2,3,4,5, None]
feature_interaction_methods = ["cyclical", "random"]
arg_options = [# Standard TR-XGBoost method with vec mech
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "gradient_budgets": "vector_mechanism",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost"},
# TR-XGBoost with GBM update
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "gradient_budgets": [0.9, 0.1],
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm"},
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "gradient_budgets": "vector_mechanism",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm"},]
# # Hist based w/ vec mech - xgboost update and gbm update
# {"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "gradient_budgets": "vector_mechanism",
# "sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost"},
#
# {"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "gradient_budgets": "vector_mechanism",
# "sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm"},]
arg_options = arg_options
epsilons = [1]
new_arg_options = []
for d in depths:
for eps in epsilons:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["epsilon"] = eps
new_args["track_budget"] = False
new_args["verbose"] = False
for method in feature_interaction_methods:
for k in feature_k:
new_new_args = new_args.copy()
new_new_args["feature_interaction_method"] = method
new_new_args["feature_interaction_k"] = k
new_arg_options.append(new_new_args)
if replication:
exp = ExperimentRunner(performance=True, data_path=replication_path)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("num_trees", num_trees))
else:
exp = ExperimentRunner(performance=False)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("num_trees", num_trees))
# Exp 4 - EBM comparisons
# Corresponds to Figure 4
def dp_ebm_experiment(save_data=False, filename="dp_ebm_experiment", replication=False, iters=3, datasets=None, seeds=None):
if not replication:
data_loader = DataLoader(global_seeds)
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "Bank", "nomao"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [10, 25, 50, 75, 100, 200, 300]
depths = [2,3,4]
hist_sizes=[32]
epsilons = [0.01, 0.05, 0.1, 0.5, 1]
else:
data_loader = DataLoader(seeds)
datasets = data_loader.load_datasets(datasets, remove_missing=True, return_dict=True, verbose=True).items()
iters = iters
num_trees = [10, 25, 50, 75, 100, 200, 300]
depths = [4]
hist_sizes=[32]
epsilons = [0.01, 0.05, 0.1, 0.5, 1]
arg_options = [
# Standard TR-XGBoost method with vec mech
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "gradient_budgets": "vector_mechanism",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost"},
# TR-XGBoost with GBM update
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "gradient_budgets": [0.9, 0.1],
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm"},
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "gradient_budgets": "vector_mechanism",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm"},
# EBM and EBM XGBoost
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "gradient_budgets": [0.9, 0.1],
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm", "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "gradient_budgets": "vector_mechanism",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost", "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# Hist based w/ vec mech - xgboost update and gbm update
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "gradient_budgets": "vector_mechanism",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost", "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "gradient_budgets": "vector_mechanism",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm", "feature_interaction_method": "cyclical", "feature_interaction_k": 1},]
arg_options = arg_options
epsilons = [1]
new_arg_options = []
for d in depths:
for eps in epsilons:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["epsilon"] = eps
new_args["track_budget"] = False
new_args["verbose"] = False
new_arg_options.append(new_args)
if replication:
exp = ExperimentRunner(performance=True, data_path=replication_path)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("num_trees", num_trees))
else:
exp = ExperimentRunner(performance=False)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("num_trees", num_trees))
# Not used in the paper
def non_dp_ebm_experiment():
exp = ExperimentRunner()
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "nomao", "Bank"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [10, 25, 50, 75, 100, 200, 300]
depths = [2,3,4]
hist_sizes=[32]
epsilons = [0.01, 0.05, 0.1, 0.5, 1]
# Test params
# depths = [6]
# epsilons = [0.01]
# num_trees = [10]
# iters = 1
non_dp_arg_options = [{"dp_method": "", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost",},
{"dp_method": "", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm",},
{"dp_method": "", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm", "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
{"dp_method": "", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost", "feature_interaction_method": "cyclical", "feature_interaction_k": 1},]
arg_options = non_dp_arg_options
epsilons = [1]
new_arg_options = []
for d in depths:
for eps in epsilons:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["epsilon"] = eps
new_arg_options.append(new_args)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=False, filename="non_dp_ebm_experiment", iterated_param=("num_trees", num_trees))
# =================== E5 - Batched Updates ===================
# Exp 6 - Batched Updates
# Corresponds to Figure 5, Table 4 in the main text, Figure 13 in the Appendix
def batched_boosting(save_data=False, filename="rf_boosting1", replication=False, iters=3, datasets=None, seeds=None):
if not replication:
data_loader = DataLoader(global_seeds)
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "Bank", "nomao"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [100, 200, 300]
depths = [2, 4]
hist_sizes = [32]
epsilons = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5]
rf_sizes = [0.05, 0.1, 0.25, 0.34, 0.5, 0.75, 1]
else:
data_loader = DataLoader(seeds)
datasets = data_loader.load_datasets(datasets, remove_missing=True, return_dict=True, verbose=True).items()
iters = iters
num_trees = [100, 200, 300]
depths = [4]
hist_sizes = [32]
epsilons = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5]
rf_sizes = [0.05, 0.1, 0.25, 0.34, 0.5, 0.75, 1]
arg_options = [
# EBM TR-GBM and EBM TR-XGBoost
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm", "feature_interaction_method": "cyclical", "feature_interaction_k": 1,
"training_method": "boosting"},
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost", "feature_interaction_method": "cyclical", "feature_interaction_k": 1,
"training_method": "boosting"},
# DP-TR-XGBoost + GBM without EBM
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost",
"training_method": "boosting"},
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "weight_update_method": "gbm",
"training_method": "boosting"},
# # Vector mech EBM + non EBM TR-XGBoost
# {"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
# "sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost", "training_method": "boosting",
# "gradient_budgets":"vector_mechanism"},
# {"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
# "sketch_type": "uniform", "track_budget": False, "weight_update_method": "xgboost", "feature_interaction_method": "cyclical", "feature_interaction_k": 1,
# "training_method": "boosting", "gradient_budgets":"vector_mechanism"},
# Normal and EBM RF
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "verbose": False, "training_method": "rf"},
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "verbose": False, "training_method": "rf" , "feature_interaction_method": "cyclical", "feature_interaction_k": 1,},
# RF boosting
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "verbose": False, "training_method": "batched_boosting", "batched_update_size": 30},
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "verbose": False, "training_method": "batched_boosting", "batched_update_size": 30, "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# {"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
# "sketch_type": "uniform", "track_budget": False, "verbose": False, "training_method": "batched_boosting", "batched_update_size": 30,
# "gradient_budgets": "vector_mechanism"},
# {"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
# "sketch_type": "uniform", "track_budget": False, "verbose": False, "training_method": "batched_boosting", "batched_update_size": 30, "feature_interaction_method": "cyclical", "feature_interaction_k": 1,
# "gradient_budgets": "vector_mechanism"},
]
new_arg_options = []
for d in depths:
for T in num_trees:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["verbose"] = False
new_args["track_budget"] = False
new_args["gradient_budgets"] = "vector_mechanism" ###
if new_args["training_method"] == "batched_boosting":
for rf_size in rf_sizes:
copy_args = new_args.copy()
copy_args["batched_update_size"] = math.ceil(rf_size*copy_args["num_trees"])
new_arg_options.append(copy_args)
else:
new_arg_options.append(new_args)
if replication:
exp = ExperimentRunner(performance=True, data_path=replication_path)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
else:
exp = ExperimentRunner(performance=False)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
# =================== E6 - Comparisons ===================
# Exp - Comparisons
# Non-private XGBoost line is ued in Figure 6 and Figures 14-18
def non_dp_comparisons_experiment():
exp = ExperimentRunner()
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "nomao", "Bank"], remove_missing=True, return_dict=True, verbose=True).items()
# datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult"], remove_missing=True, return_dict=True, verbose=True).items()
# datasets = data_loader.load_datasets(["higgs_sampled", "nomao", "Bank"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
depths = [2, 3, 4]
hist_sizes = [32]
# epsilons = [0.1, 0.5, 1]
epsilons = [0]
# rf_sizes = [0.05, 0.1, 0.25, 0.5, 1]
rf_sizes = [0.25, 1]
# Test params
# depths = [6]
# epsilons = [1]
# num_trees = [100]
# iters = 1
arg_options = [
# FEVERLESS (uniform)
{"dp_method": "", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "uniform"},
# FEVERLESS (quantiles)
{"dp_method": "", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "exact_quantiles"},
# DP-RF
{"dp_method": "", "split_method": "totally_random", "training_method": "rf"},
# DP-EBM (full cycle, GBM)
{"dp_method": "", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "gbm",
"full_ebm": True, "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-EBM XGBoost (no full cycle)
{"dp_method": "", "split_method": "totally_random", "weight_update_method": "xgboost",
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR XGBoost
{"dp_method": "", "split_method": "totally_random", "weight_update_method": "xgboost",},]
new_arg_options = []
for d in depths:
for T in num_trees:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["track_budget"] = False
new_args["verbose"] = False
if new_args.get("training_method") == "batched_boosting":
for rf_size in rf_sizes:
copy_args = new_args.copy()
copy_args["batched_update_size"] = math.ceil(rf_size*copy_args["num_trees"])
new_arg_options.append(copy_args)
else:
new_arg_options.append(new_args)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=False, filename="non_dp_comparisons", iterated_param=("epsilon", epsilons))
# Corresponds to Figure 6 in the main text and Figures 14-18 in the Appendix
def comparisons_experiment(save_data=False, filename="comparisons1", replication=False, iters=3, datasets=None, seeds=None):
if not replication:
data_loader = DataLoader(global_seeds)
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "nomao", "Bank", "higgs_sampled"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 3
num_trees = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
depths = [4]
hist_sizes = [32]
epsilons = [0.1, 0.5, 1]
rf_sizes = [0.25, 1]
else:
data_loader = DataLoader(seeds)
datasets = data_loader.load_datasets(datasets, remove_missing=True, return_dict=True, verbose=True).items()
iters = iters
num_trees = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
depths = [4]
hist_sizes = [32]
epsilons = [0.1, 0.5, 1]
rf_sizes = [0.25, 1]
arg_options = [
# FEVERLESS (private)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "uniform"},
# DP-GBM
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "gbm"},
# DP-RF
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "training_method": "rf"},
# DP-EBM (full cycle, GBM)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "gbm",
"full_ebm": True, "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-EBM XGBoost (no full cycle)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR XGBoost
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",},
# DP-TR XGBoost w/ IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR XGBoost EBM w/IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "weight_update_method": "xgboost", "sketch_type": "adaptive_hessian", "sketch_rounds": 5,
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR Batched boosting
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "training_method": "batched_boosting", "batched_update_size": 30},
# DP-TR Batched Boosting + IH (s=5)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"training_method": "batched_boosting", "batched_update_size": 30, "sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR Batched Boosting + IH (s=5) + EBM
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"training_method": "batched_boosting", "batched_update_size": 30, "sketch_type": "adaptive_hessian", "sketch_rounds": 5,
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
]
new_arg_options = []
for d in depths:
for T in num_trees:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["verbose"] = False
new_args["track_budget"] = False
new_args["gradient_budgets"] = "vector_mechanism" ###
if new_args.get("training_method") == "batched_boosting":
for rf_size in rf_sizes:
copy_args = new_args.copy()
copy_args["batched_update_size"] = math.ceil(rf_size*copy_args["num_trees"])
new_arg_options.append(copy_args)
else:
new_arg_options.append(new_args)
if replication:
exp = ExperimentRunner(performance=True, data_path=replication_path)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
else:
exp = ExperimentRunner(performance=False)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
# Not used in the paper
def full_comparisons_experiment():
exp = ExperimentRunner()
# datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "nomao", "Bank"], remove_missing=True, return_dict=True, verbose=True).items()
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult"], remove_missing=True, return_dict=True, verbose=True).items()
iters = 1
num_trees = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
depths = [2, 3, 4]
hist_sizes = [32]
epsilons = [0.1, 0.5, 1]
rf_sizes = [0.05, 0.1, 0.25, 0.5, 1]
# Test params
# depths = [6]
# epsilons = [1]
# num_trees = [100]
# iters = 1
arg_options = [ # FEVERLESS (non-private)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "feverless"},
# FEVERLESS (private)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "uniform"},
# DP-GBM
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "gbm"},
# DP-RF
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "training_method": "rf"},
# DP-EBM (full cycle, GBM)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "gbm",
"full_ebm": True, "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-EBM XGBoost (no full cycle)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-XGBoost 2-way
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"feature_interaction_method": "cyclical", "feature_interaction_k": 2},
# DP-XGBoost 2-way random
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"feature_interaction_method": "random", "feature_interaction_k": 2},
# DP-XGBoost 5-way
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"feature_interaction_method": "random", "feature_interaction_k": 5},
# DP-TR XGBoost
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",},
# DP-TR XGBoost w/ IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR Batched boosting
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "verbose": False, "training_method": "batched_boosting", "batched_update_size": 30},
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "verbose": False, "training_method": "batched_boosting", "batched_update_size": 30, "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR Batched Boosting + IH (s=5)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "track_budget": False, "verbose": False,
"training_method": "batched_boosting", "batched_update_size": 30, "sketch_type": "adaptive_hessian", "sketch_rounds": 5},
]
new_arg_options = []
for d in depths:
for T in num_trees:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
if new_args.get("training_method") == "batched_boosting":
for rf_size in rf_sizes:
copy_args = new_args.copy()
copy_args["batched_update_size"] = math.ceil(rf_size*copy_args["num_trees"])
new_arg_options.append(copy_args)
else:
new_arg_options.append(new_args)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=False, filename="comparisons", iterated_param=("epsilon", epsilons))
# =================== Revisions ===================
# LDP Baselines
def ldp_comparisons(save_data=False, filename="ldp", iters=3, datasets=None, seeds=None):
data_loader = DataLoader(global_seeds)
datasets = data_loader.load_datasets(["Credit 1", "Credit 2", "adult", "nomao", "Bank", "higgs_sampled"], remove_missing=True, return_dict=True, verbose=True).items()
num_trees = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
epsilons = [0.1, 0.5, 1]
depths = [4]
hist_sizes = [32]
rf_sizes = []
arg_options = [
# LDP Hist based
{"dp_method": "gaussian_ldp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "uniform"},
# LDP TR
{"dp_method": "gaussian_ldp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"sketch_type": "uniform"}, ]
# arg_options = [
# # Testing TR
# # DP-TR Batched boosting
# {"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
# "sketch_type": "uniform", "weight_update_method": "xgboost", "sketch_type": "adaptive_hessian", "sketch_rounds": 5,
# "feature_interaction_method": "cyclical", "feature_interaction_k": 1}]
new_arg_options = []
for d in depths:
for T in num_trees:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["verbose"] = False
new_args["track_budget"] = False
new_args["gradient_budgets"] = "vector_mechanism" ###
if new_args.get("training_method") == "batched_boosting":
for rf_size in rf_sizes:
copy_args = new_args.copy()
copy_args["batched_update_size"] = math.ceil(rf_size*copy_args["num_trees"])
new_arg_options.append(copy_args)
else:
new_arg_options.append(new_args)
exp = ExperimentRunner(performance=True, output_train_monitor=False)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons), n_jobs=4)
# Synthetic exp test
def synthetic_test_exp(save_data=False, filename="synthetic_test", iters=3, datasets=None, seeds=None):
data_loader = DataLoader([global_seeds[0]])
num_features = [30]
num_samples = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
num_informative = [26]
datasets = {}
for n in num_samples:
for m in num_features:
for informative in num_informative:
datasets = datasets | data_loader.load_datasets(["synthetic_n=" + str(n) + "_m=" + str(m) + "_informative=" + str(informative)], remove_missing=True, return_dict=True, verbose=True)
datasets = datasets.items()
num_trees = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
depths = [4]
hist_sizes = [32]
epsilons = [1]
rf_sizes = []
arg_options = [
# FEVERLESS (private)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "uniform"},
# DP-GBM
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "gbm"},
# DP-RF
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "training_method": "rf"},
# DP-EBM (full cycle, GBM)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "gbm",
"full_ebm": True, "feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-EBM XGBoost (no full cycle)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# LDP-TR XGBoost
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",},
]
new_arg_options = []
for d in depths:
for T in num_trees:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["verbose"] = False
new_args["track_budget"] = False
new_args["gradient_budgets"] = "vector_mechanism" ###
if new_args.get("training_method") == "batched_boosting":
for rf_size in rf_sizes:
copy_args = new_args.copy()
copy_args["batched_update_size"] = math.ceil(rf_size*copy_args["num_trees"])
new_arg_options.append(copy_args)
else:
new_arg_options.append(new_args)
exp = ExperimentRunner(performance=True)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
# Synthetic communication
def synthetic_communication(save_data=True, filename="synthetic_comm"):
data_loader = DataLoader([global_seeds[0]])
# num_features = [30]
num_samples = [10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, 100000]
num_samples = [2500]
num_features = [(10,6), (20, 12), (30, 20), (40, 30), (50, 40)]
datasets = {}
for n in num_samples:
for item in num_features:
m, informative = item
datasets = datasets | data_loader.load_datasets(["synthetic_n=" + str(n) + "_m=" + str(m) + "_informative=" + str(informative)], remove_missing=True, return_dict=True, verbose=True)
datasets = datasets.items()
iters = 3
num_trees = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
num_trees = np.arange(100, 1000, 100)
depths = [3,4,5,6]
# depths = [4]
hist_sizes = [4,8,16,32,64,128]
rf_sizes = [0.25, 1]
epsilons = [1]
arg_options = [
# FEVERLESS (private)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "uniform"},
# DP-TR XGBoost w/ IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR XGBoost EBM w/IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "weight_update_method": "xgboost", "sketch_type": "adaptive_hessian", "sketch_rounds": 5,
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR Batched boosting
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "training_method": "batched_boosting", "batched_update_size": 30},
# DP-TR Batched Boosting + IH (s=5)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"training_method": "batched_boosting", "batched_update_size": 30, "sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR Batched Boosting + IH (s=5) + EBM
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"training_method": "batched_boosting", "batched_update_size": 30, "sketch_type": "adaptive_hessian", "sketch_rounds": 5,
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR XGBoost
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",},
]
new_arg_options = []
for d in depths:
for T in num_trees:
for q in hist_sizes:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["verbose"] = False
new_args["track_budget"] = False
# new_args["q"] = q
new_args["gradient_budgets"] = "vector_mechanism" ###
if new_args.get("training_method") == "batched_boosting":
for rf_size in rf_sizes:
copy_args = new_args.copy()
copy_args["batched_update_size"] = math.ceil(rf_size*copy_args["num_trees"])
new_arg_options.append(copy_args)
else:
new_arg_options.append(new_args)
exp = ExperimentRunner(performance=True)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons), n_jobs=4)
# Clients
def vary_clients(save_data=True, filename="vary_clients"):
data_loader = DataLoader([global_seeds[0]])
# num_features = [30]
num_samples = [100, 500, 1000, 2500, 5000, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, 100000]
# num_samples = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
num_features = [(30, 26)]
datasets = {}
for n in num_samples:
for item in num_features:
m, informative = item
datasets = datasets | data_loader.load_datasets(["synthetic_n=" + str(n) + "_m=" + str(m) + "_informative=" + str(informative)], remove_missing=True, return_dict=True, verbose=True)
datasets = datasets.items()
iters = 3
num_trees = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
num_trees = [100]
depths = [4]
hist_sizes = [2,4,8,16,32,64,128,256,512]
rf_sizes = [0.25, 1]
epsilons = [1]
arg_options = [
# FEVERLESS (private)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "uniform"},
# DP-TR XGBoost w/ IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR XGBoost EBM w/IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "weight_update_method": "xgboost", "sketch_type": "adaptive_hessian", "sketch_rounds": 5,
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR Batched boosting
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "training_method": "batched_boosting", "batched_update_size": 30},
# DP-TR Batched Boosting + IH (s=5)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"training_method": "batched_boosting", "batched_update_size": 30, "sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR Batched Boosting + IH (s=5) + EBM
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"training_method": "batched_boosting", "batched_update_size": 30, "sketch_type": "adaptive_hessian", "sketch_rounds": 5,
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR XGBoost
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",},
]
arg_options = [
# DP-TR XGBoost w/ IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR XGBoost
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",},
# XGBoost
{"dp_method": "", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "exact_quantiles"}
]
new_arg_options = []
for d in depths:
for T in num_trees:
# for q in hist_sizes:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["verbose"] = False
new_args["track_budget"] = False
new_args["gradient_budgets"] = "vector_mechanism" ###
if new_args.get("training_method") == "batched_boosting":
for rf_size in rf_sizes:
copy_args = new_args.copy()
copy_args["batched_update_size"] = math.ceil(rf_size*copy_args["num_trees"])
new_arg_options.append(copy_args)
else:
new_arg_options.append(new_args)
exp = ExperimentRunner(performance=True)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons), n_jobs=4)
# Computation benchmarks
def computation_benchmark(save_data=True, filename="computation_benchmark"):
data_loader = DataLoader([global_seeds[0]])
# num_features = [30]
num_samples = [10000]
num_features = [(10,6), (20, 12), (30, 20), (40, 30), (50, 40)]
datasets = {}
for n in num_samples:
for item in num_features:
m, informative = item
datasets = datasets | data_loader.load_datasets(["synthetic_n=" + str(n) + "_m=" + str(m) + "_informative=" + str(informative)], remove_missing=True, return_dict=True, verbose=True)
datasets = datasets.items()
iters = 3
num_trees = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
# num_trees = np.arange(100, 1000, 100)
depths = [3,4,5]
hist_sizes = [16,32,64]
rf_sizes = [0.25, 1]
epsilons = [1]
# num_trees = [50]
# depths = [4]
# hist_sizes = [32]
arg_options = [
# FEVERLESS (private)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "hist_based", "weight_update_method": "xgboost",
"sketch_type": "uniform"},
# DP-TR XGBoost w/ IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR XGBoost EBM w/IH
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "weight_update_method": "xgboost", "sketch_type": "adaptive_hessian", "sketch_rounds": 5,
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR Batched boosting
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"sketch_type": "uniform", "training_method": "batched_boosting", "batched_update_size": 30},
# DP-TR Batched Boosting + IH (s=5)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"training_method": "batched_boosting", "batched_update_size": 30, "sketch_type": "adaptive_hessian", "sketch_rounds": 5},
# DP-TR Batched Boosting + IH (s=5) + EBM
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random",
"training_method": "batched_boosting", "batched_update_size": 30, "sketch_type": "adaptive_hessian", "sketch_rounds": 5,
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-EBM XGBoost (no full cycle)
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",
"feature_interaction_method": "cyclical", "feature_interaction_k": 1},
# DP-TR XGBoost
{"dp_method": "gaussian_cdp", "accounting_method": "rdp_scaled_improved", "split_method": "totally_random", "weight_update_method": "xgboost",},
]
new_arg_options = []
for d in depths:
for T in num_trees:
for args in arg_options:
new_args = args.copy()
new_args["max_depth"] = d
new_args["num_trees"] = T
new_args["verbose"] = False
new_args["track_budget"] = False
new_args["gradient_budgets"] = "vector_mechanism" ###
if new_args.get("training_method") == "batched_boosting":
for rf_size in rf_sizes:
copy_args = new_args.copy()
copy_args["batched_update_size"] = math.ceil(rf_size*copy_args["num_trees"])
new_arg_options.append(copy_args)
else:
new_arg_options.append(new_args)
exp = ExperimentRunner(performance=True, output_train_monitor=False)
exp.run(datasets, iters, new_arg_options, hist_sizes, save_data=save_data, filename=filename, iterated_param=("epsilon", epsilons))
# =================== Run Experiments ===================
# dp_split_methods_with_update_methods()
# non_dp_split_methods_with_update_methods()
# gradient_budget_alloc()
# dp_split_candidate_methods()
# non_dp_split_candidate_methods()
# non_dp_ebm_experiment()
# dp_ebm_experiment()
# feature_interaction_experiments()
# batched_boosting()
# feature_interaction_experiments()
# batched_boosting()
# non_dp_comparisons_experiment()
# comparisons_experiment()
# full_comparisons_experiment()
# ldp_comparisons(True)
# ldp_comparisons(True, filename="ldp_tr")
# synthetic_test_exp(True)
# analytical_stats()
# synthetic_communication()
# vary_clients()
# computation_benchmark()
# quantize_test() | 63,493 | 50.537338 | 224 | py |
federated-boosted-dp-trees | federated-boosted-dp-trees-master/experiments/paper_experiments/paper_plotter.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import shutil
import matplotlib.lines as mlines
from collections import defaultdict
sns.set_theme(style="whitegrid")
def set_fontsize(size=14):
tex_fonts = {
#"text.usetex": True,
"font.family": "serif",
"axes.labelsize": size,
"font.size": size,
"legend.fontsize": size,
"xtick.labelsize": size,
"ytick.labelsize": size
}
plt.rcParams.update(tex_fonts)
def exp_plot():
x = np.linspace(1, 100, num=10)
y = np.ones(len(x))
sns.lineplot(x=x, y=y)
plt.savefig("./paper_plots/fig1.eps")
# Add columns from args for plotting
def process_df_for_plotting(df):
df["args"] = df["args"].str.replace("'gaussian_cdp", "DP")
dp_method = df["args"].str.split("'dp_method':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'")
df["dp_method"] = dp_method
df["split_method"] = df["args"].str.split("'split_method':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'")
df["split_method"] = df["split_method"].str.replace(" ", "")
max_depth = df["args"].str.split("'max_depth':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'")
df["max_depth"] = max_depth.str.replace(" ", "")
if not "num_trees" in df.columns:
df["num_trees"] = df["args"].str.split("'num_trees':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'").str.strip(" ")
if not "epsilon" in df.columns:
df["epsilon"] = df["args"].str.split("'epsilon':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'").str.strip(" ")
df["dataset"] = df["dataset"].str.split("_", expand=True)[0]
# Feature Interaction...
df["ebm"] = "False"
if df["args"].str.contains("ebm").sum() > 0 and df["args"].str.contains("full_ebm").sum() == 0:
df["ebm"][df["args"].str.contains("ebm")] = df["args"].str.split("'ebm':", expand=True)[1].str.split(",", expand=True)[0]
df["feature_interaction_method"] = "standard"
if df["args"].str.contains("feature_interaction_method").sum() > 0:
df["feature_interaction_method"][df["args"].str.contains("feature_interaction_method")] = df["args"].str.split("'feature_interaction_method':", expand=True)[1].str.split(",", expand=True)[0]
df["feature_interaction_k"] = "d"
if df["args"].str.contains("feature_interaction_k").sum() > 0:
df["feature_interaction_k"][df["args"].str.contains("feature_interaction_k")] = df["args"].str.split("'feature_interaction_k':", expand=True)[1].str.split(",", expand=True)[0]
df["weight_update"] = ""
if df["args"].str.contains("weight_update_method").sum() > 0:
df["weight_update"][df["args"].str.contains("weight_update_method")] = df["args"].str.split("'weight_update_method':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'").str.strip(" ")
df["weight_update"][df["args"].str.contains("rf")] = "rf"
df["split_method"] = df["split_method"].str.replace("'", "")
df["weight_update"] = df["weight_update"].str.replace("'", "")
df["gradient_budgets"] = ""
if df["args"].str.contains("gradient_budgets").sum() > 0:
df["gradient_budgets"][df["args"].str.contains("gradient_budgets")] = df["args"].str.split("'gradient_budgets':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'").str.strip(" ")
df["batch_size"] = df["num_trees"].astype("float32")
if df["args"].str.contains("batched_update_size").sum() > 0:
df["batch_size"][df["args"].str.contains("batched_update_size")] = df[df["args"].str.contains("batched_update_size")]["args"].str.split("'batched_update_size':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'")
df["batch_perc"] = df["batch_size"].astype("float32") / df["num_trees"].astype("float32")
df["training_method"] = "boosting"
if df["args"].str.contains("training_method").sum() > 0:
df["training_method"][df["args"].str.contains("training_method")] = df["args"].str.split("'training_method':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'")
df["vec_mech"] = "False"
if df["args"].str.contains("vector_mechanism").sum() > 0:
df["vec_mech"][df["args"].str.contains("vector_mechanism")] = "True"
df["sketch_type"] = "uniform"
if df["args"].str.contains("sketch_type").sum() > 0:
df["sketch_type"] = df["args"].str.split("'sketch_type':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'")
df["sketch_type"][df["sketch_type"].isna()] = "uniform"
df["sketch_type"] = df["sketch_type"].str.replace(" '", " ")
df["sketch_rounds"] = "inf"
if df["args"].str.contains("sketch_rounds").sum() > 0:
df["sketch_rounds"] = df["args"].str.split("'sketch_rounds':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'")
df["quantize_q"] = "float64"
if df["args"].str.contains("quantize_q").sum() > 0:
df["quantize_q"] = df["args"].str.split("'quantize_q':",expand=True)[1].str.split(",", expand=True)[0].str.strip("'")
return df
def clear_dir(path):
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.makedirs(path)
# ============================================= E1 + E2 - Split Methods =============================================
# Vary T, D, eps
def plot_split_methods_with_update(in_path="./paper_results/E1_dp_split_methods_with_update_final.csv",
out_path="./paper_plots/", replication=False, dataset="Credit 1", show_dp=True, y_lims=None, legends=None):
if y_lims is None:
y_lims = [None]*3
if legends is None:
legends = [True]*3
if not replication:
df1 = pd.read_csv(in_path)
df2 = pd.read_csv("./paper_results/non_dp_split_methods_with_update.csv")
df = pd.concat([df1, df2])
df = df.reset_index()
else:
df = pd.read_csv(in_path)
df = process_df_for_plotting(df)
df["args"] = df["dp_method"]
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"DP" : "DP",}
df = df.replace({"args":arg_map})
# df["args"] += "-" + df["weight_update"] + " " + df["split_method"] + " " + df["gradient_budgets"]
df["args"] += "-" + df["weight_update"] + " " + df["split_method"] + " "
# df = df[df["args"].str.contains("DP")]
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"DP-xgboosttotally_random" : "DP-TR Newton",
"DP-xgboostpartially_random" : "DP-PR Newton",
"DP-xgboosthist_based" : "DP-Hist Newton",
"-xgboosttotally_random" : "TR Newton",
"-xgboostpartially_random" : "PR Newton",
"-xgboosthist_based" : "Hist Newton",}
df = df.replace({"args":arg_map})
# ================= Vary T ==================
filter_df = df[df["dataset"] == dataset]
max_depth = "4"
filter_df = filter_df[filter_df["max_depth"] == max_depth]
# trees = "200"
# df = df[df["num_trees"] == trees]
# epsilon = ""
epsilon = 1
# filter_df = filter_df[filter_df["epsilon"] == epsilon]
if show_dp:
filter_df = pd.concat([filter_df[filter_df["epsilon"] == epsilon], filter_df[filter_df["epsilon"] == 0]])
else:
filter_df = filter_df[filter_df["epsilon"] == epsilon]
# cm = sns.color_palette("Blues_r", 10) + sns.color_palette("Reds", 10) + sns.color_palette("Purples",1)
cm = None
filter_df = filter_df[filter_df["args"].str.contains("Newton")] # xgb update
filter_df["Split Method"] = filter_df["args"]
filter_df["Type"] = "Test"
filter_df["auc"] = filter_df["test_auc"]
new_df = filter_df[["num_trees", "auc", "Split Method", "Type"]].copy()
new_df["auc"] = filter_df["train_auc"]
new_df["Type"] = "Train"
filter_df = filter_df[["num_trees", "auc", "Split Method", "Type"]]
plot_df = pd.concat([filter_df, new_df]).reset_index()
ax = sns.lineplot(data=plot_df, x="num_trees", y="auc", hue="Split Method", style="Type", palette=cm)
# sns.lineplot(data=filter_df, x="num_trees", y="test_auc", hue="test_args", palette=cm)
# sns.lineplot(data=filter_df, x="num_trees", y="train_auc", hue="train_args", palette=cm, linestyle="--")
plt.xlabel("Number of trees (T)")
plt.ylabel("Test AUC")
if not legends[0]:
ax.get_legend().remove()
plt.tight_layout()
plt.savefig(out_path + "vary_t_" + dataset + ".pdf")
#plt.show()
plt.clf()
# ================= Vary D ==================
filter_df = df[df["dataset"] == dataset]
# max_depth = "4"
# filter_df = filter_df[filter_df["max_depth"] == max_depth]
filter_df_hist = filter_df[filter_df["args"].str.contains("Hist") | filter_df["args"].str.contains("PR") ]
filter_df_hist = filter_df_hist[filter_df_hist["num_trees"] == "25"]
filter_df_other = filter_df[filter_df["args"].str.contains("TR")]
filter_df_other = filter_df_other[filter_df_other["num_trees"] == "300"]
filter_df = pd.concat([filter_df_other, filter_df_hist])
# trees = "200"
# df = df[df["num_trees"] == trees]
# epsilon = ""
epsilon = 1
filter_df = filter_df[filter_df["epsilon"] == epsilon]
filter_df = filter_df[filter_df["args"].str.contains("Newton")] # xgb update
filter_df["Split Method"] = filter_df["args"]
filter_df["Type"] = "Test"
filter_df["auc"] = filter_df["test_auc"]
new_df = filter_df[["max_depth", "auc", "Split Method", "Type"]].copy()
new_df["auc"] = filter_df["train_auc"]
new_df["Type"] = "Train"
filter_df = filter_df[["max_depth", "auc", "Split Method", "Type"]]
plot_df = pd.concat([filter_df, new_df]).reset_index()
ax = sns.lineplot(data=plot_df, x="max_depth", y="auc", hue="Split Method", style="Type", palette=cm)
leg = plt.legend( loc = 'lower right')
if not legends[1]:
ax.get_legend().remove()
plt.xlabel("Maximum Depth (d)")
plt.ylabel("Test AUC")
plt.ylim(y_lims[1])
plt.tight_layout()
plt.savefig(out_path + "vary_D_" + dataset + ".pdf")
#plt.show()
plt.clf()
# ================= Vary eps ==================
filter_df = df[df["dataset"] == dataset]
max_depth = "3"
filter_df = filter_df[filter_df["max_depth"] == max_depth]
filter_df = filter_df[filter_df["epsilon"] != 0] # Will block non-dp from being plotted
filter_df_hist = filter_df[filter_df["args"].str.contains("Hist") | filter_df["args"].str.contains("PR") ]
filter_df_hist = filter_df_hist[filter_df_hist["num_trees"] == "25"]
filter_df_other = filter_df[filter_df["args"].str.contains("TR")]
filter_df_other = filter_df_other[filter_df_other["num_trees"] == "300"]
filter_df = pd.concat([filter_df_other, filter_df_hist])
cm = None
filter_df = filter_df[filter_df["args"].str.contains("Newton")] # xgb update
filter_df["Split Method"] = filter_df["args"]
filter_df["Type"] = "Test"
filter_df["auc"] = filter_df["test_auc"]
new_df = filter_df[["epsilon", "auc", "Split Method", "Type"]].copy()
new_df["auc"] = filter_df["train_auc"]
new_df["Type"] = "Train"
filter_df = filter_df[["epsilon", "auc", "Split Method", "Type"]]
plot_df = pd.concat([filter_df, new_df]).reset_index()
leg = plt.legend( loc = 'lower right')
if not legends[2]:
ax.get_legend().remove()
sns.lineplot(data=plot_df, x="epsilon", y="auc", hue="Split Method", style="Type", palette=cm)
plt.xlabel("Privacy Budget ($\epsilon$)")
plt.ylabel("Test AUC")
plt.savefig(out_path + "vary_e_" + dataset + ".pdf")
set_fontsize()
#plt.show()
plt.clf()
# Displays latex for Table 3
def table_split_methods_with_update(epsilon=0.5, max_depth="4"):
df = pd.read_csv("./paper_results/E1_dp_split_methods_with_update_final.csv")
# df2 = pd.read_csv("./paper_results/non_dp_split_methods_with_update.csv")
# df = pd.concat([df1, df2])
# df = df.reset_index()
df = process_df_for_plotting(df)
df["args"] = df["dp_method"]
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"DP" : "DP-XGBoost",}
df = df.replace({"args":arg_map})
# df["args"] += df["split_method"] + " " + df["weight_update"] + " " + df["gradient_budgets"]
df["args"] += df["split_method"] + " " + df["weight_update"]
# Filter plot params
# dataset = "Credit 1"
# df = df[df["dataset"] == dataset]
#
df = df[df["max_depth"] == max_depth]
# trees = "200"
# df = df[df["num_trees"] == trees]
# epsilon = ""
df = df[df["epsilon"] == epsilon]
filter_df_hist = df[df["args"].str.contains("hist_based") | df["args"].str.contains("partially_random") ]
filter_df_hist = filter_df_hist[filter_df_hist["num_trees"] == "25"]
filter_df_other = df[df["args"].str.contains("totally_random")]
filter_df_other = filter_df_other[filter_df_other["num_trees"] == "200"]
df = pd.concat([filter_df_other, filter_df_hist])
df = df[df["args"].str.contains("DP")]
# print(df.groupby(["weight_update"]).mean()["test_auc"].astype("str") + " +- " + df.groupby(["weight_update"]).std()["test_auc"].astype("str"))
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
means = df.groupby(["dataset", "split_method", "weight_update"]).mean()["test_auc"].round(4).astype("str")
sds = df.groupby(["dataset", "split_method", "weight_update"]).std()["test_auc"].round(4).astype("str")
table = means + " +- " + sds
max_vals = df.groupby(["dataset", "split_method", "weight_update"]).mean()["test_auc"].round(4).groupby(["dataset", "split_method"]).max().astype("str")
# print(max_vals)
# means[means.isin(max_vals)] = "\\textbf{" + means[means.isin(max_vals)] + "}"
for val in max_vals.values:
table[means==val] = "\\textbf{" + table[table.str.contains(val)] + "}"
datasets, column_index, _ = zip(*table.index)
# print("COLUMN INDEX", _)
column_index = [["Hist"]*3 + ["PR"]*3 + ["TR"]*3, ["Gradient", "Averaging", "Newton"]*3]
datasets = list(dict.fromkeys(datasets))
table = pd.DataFrame(table.values.reshape(len(datasets), -1), columns=column_index, index=datasets)
# table = pd.DataFrame(table.values.reshape(-1, len(datasets)), columns=datasets, index=column_index)
print(table)
print(table.transpose().to_latex(escape=False))
print("\n")
# Not used...
def table_low_eps_split_methods_with_update():
df1 = pd.read_csv("./paper_results/dp_split_methods_with_update_final.csv")
df2 = pd.read_csv("./paper_results/non_dp_split_methods_with_update.csv")
df = pd.concat([df1, df2])
df = df.reset_index()
df = process_df_for_plotting(df)
df["args"] = df["dp_method"]
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"DP" : "DP-XGBoost",}
df = df.replace({"args":arg_map})
df["args"] += df["split_method"] + " " + df["weight_update"] + " " + df["gradient_budgets"]
# Filter plot params
# dataset = "Credit 1"
# df = df[df["dataset"] == dataset]
#
max_depth = "4"
df = df[df["max_depth"] == max_depth]
# trees = "200"
# df = df[df["num_trees"] == trees]
# epsilon = ""
# epsilon = 0.5
# df = df[df["epsilon"] == epsilon]
filter_df_hist = df[df["args"].str.contains("hist_based") | df["args"].str.contains("partially_random") ]
filter_df_hist = filter_df_hist[filter_df_hist["num_trees"] == "25"]
filter_df_other = df[df["args"].str.contains("totally_random")]
filter_df_other = filter_df_other[filter_df_other["num_trees"] == "200"]
df = pd.concat([filter_df_other, filter_df_hist])
df = df[df["args"].str.contains("DP")]
print(df.groupby(["weight_update"]).mean()["test_auc"].astype("str") + " +- " + df.groupby(["weight_update"]).std()["test_auc"].astype("str"))
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
means = df.groupby(["dataset", "epsilon", "split_method", "weight_update"]).mean()["test_auc"].round(4).astype("str")
sds = df.groupby(["dataset","epsilon", "split_method", "weight_update"]).std()["test_auc"].round(4).astype("str")
table = means + " +- " + sds
print(table)
# sns.boxplot(data=df, x="epsilon", y="test_auc", hue="args")
# plt.show()
# ============================================= E3 - Split Candidate Methods =============================================
def plot_split_candidates(in_path=None, out_path="./paper_plots/", replication=False, dataset="Credit 1", ylim=0.7):
if not replication:
df = pd.read_csv("./paper_results/E3_split_candidate_methods_final.csv")
df2 = pd.read_csv("./paper_results/E3_split_candidate_methods_IH.csv")
df = df[~df["args"].str.contains("adaptive_hessian")]
df = pd.concat([df, df2])
else:
df = pd.read_csv(in_path)
df = process_df_for_plotting(df)
df["args"] = df["dp_method"] + df["split_method"] + df["ebm"].astype("str")
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"DPtotally_randomFalse" : "DP-TR Newton",
"DPtotally_randomTrue" : "DP-TR Newton EBM",
"DPhist_basedTrue" : "DP-Hist Newton EBM",
"DPhist_basedFalse" : "DP-Hist Newton",}
df = df.replace({"args":arg_map})
df["args"] = df["args"] + " -" + df["sketch_type"] + " rounds:" + df["sketch_rounds"].astype("str")
# Remove quantile sketching
df = df[~df["args"].str.contains("feverless")]
print(df["args"])
print(set(df["args"].values))
# Filter plot params
# Hist no EBM, no vary rounds
# df = df[df["args"].str.contains("DP-XGBoost") & (~df["args"].str.contains("EBM")) &
# (~df["args"].str.contains("TR")) & (df["args"].str.contains("inf"))]
# Hist no EBM, just vary rounds
# df = df[df["args"].str.contains("DP-XGBoost") & (~df["args"].str.contains("EBM")) &
# (~df["args"].str.contains("TR")) & (df["args"].str.contains("adaptive_hessian"))]
# TR, no EBM, no vary rounds
# df = df[(df["args"].str.contains("TR")) & ~(df["args"].str.contains("EBM")) & (df["args"].str.contains("inf"))]
# TR, no EBM, vary rounds
# df = df[(df["args"].str.contains("TR")) & ~(df["args"].str.contains("EBM")) & (df["args"].str.contains("adaptive_hessian"))]
# ==================== VARY EPS PLOT ====================
# TR, no EBM, rounds=5 best one
filter1 = df[(df["args"].str.contains("TR")) & ~(df["args"].str.contains("EBM")) & (~df["args"].str.contains("adaptive_hessian"))]
filter2 = df[df["args"].str.contains("adaptive_hessian rounds: 5")]
filtered_df = pd.concat([filter1,filter2])
filtered_df = filtered_df.reset_index()
filtered_df["args"] = filtered_df["sketch_type"]
arg_map = {" uniform" : "Uniform",
" log" : "Log",
" exact_quantiles" : "Quantiles",
" adaptive_hessian" : "Iterative Hessian (s=5)"}
filtered_df = filtered_df.replace({"args":arg_map})
# TR with EBM splits
# df = df[(df["args"].str.contains("TR")) & (df["args"].str.contains("EBM"))]
# Hist with EBM splits
# df = df[~(df["args"].str.contains("TR")) & (df["args"].str.contains("EBM"))]
filtered_df["num_trees"] = filtered_df["num_trees"].astype("str")
filtered_df["max_depth"] = filtered_df["max_depth"].astype("str")
print(set(filtered_df["max_depth"].values))
print(set(filtered_df["num_trees"].values))
print(set(filtered_df["dataset"].values))
filtered_df = filtered_df[filtered_df["dataset"] == dataset]
max_depth = "4"
filtered_df = filtered_df[filtered_df["max_depth"] == max_depth]
trees = "100"
# trees = "200"
filtered_df = filtered_df[filtered_df["num_trees"] == trees]
filtered_df = filtered_df[filtered_df["hist_bin"] == 32]
# epsilon = ""
# epsilon = 1
# df = df[df["epsilon"] == epsilon]
# cm = sns.color_palette("Blues_r", 10) + sns.color_palette("Reds", 10) + sns.color_palette("Purples",1)
cm = None
# sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm)
ax = sns.barplot(data=filtered_df, x="epsilon", y="test_auc", hue="args", palette=cm, ci="sd")
# plt.title(dataset + "- max_depth:" + max_depth + " epsilon: " + str(epsilon))
leg = plt.legend( loc = 'lower right')
# ax.legend(bbox_to_anchor=(1.1, 1.05))
# plt.tight_layout()
plt.ylim(0.5)
plt.xlabel("Privacy Budget ($\epsilon$)")
plt.ylabel("Test AUC")
plt.tight_layout()
plt.savefig(out_path+ "split_candidates_vary_eps_" + dataset+".pdf")
plt.clf()
ax = sns.lineplot(data=filtered_df, x="epsilon", y="test_auc", hue="args", palette=cm, ci="sd")
leg = plt.legend( loc = 'lower right')
plt.ylim(0.5)
plt.xlabel("Privacy Budget ($\epsilon$)")
plt.ylabel("Test AUC")
plt.tight_layout()
plt.savefig(out_path+ "split_candidates_vary_eps_" + dataset+"_lineplot.pdf")
plt.clf()
# ==================== VARY T PLOT ====================
filter1 = df[(df["args"].str.contains("TR")) & ~(df["args"].str.contains("EBM")) & (~df["args"].str.contains("adaptive_hessian"))]
filter2 = df[df["args"].str.contains("adaptive_hessian rounds: 5")]
filtered_df = pd.concat([filter1,filter2])
filtered_df = filtered_df.reset_index()
filtered_df["args"] = filtered_df["sketch_type"]
arg_map = {" uniform" : "Uniform",
" log" : "Log",
" exact_quantiles" : "Quantiles",
" adaptive_hessian" : "Iterative Hessian (s=5)"}
filtered_df = filtered_df.replace({"args":arg_map})
# TR with EBM splits
# df = df[(df["args"].str.contains("TR")) & (df["args"].str.contains("EBM"))]
# Hist with EBM splits
# df = df[~(df["args"].str.contains("TR")) & (df["args"].str.contains("EBM"))]
filtered_df = filtered_df[filtered_df["dataset"] == dataset]
max_depth = "4"
filtered_df = filtered_df[filtered_df["max_depth"] == max_depth]
# trees = "100"
filtered_df = filtered_df[filtered_df["num_trees"].isin(["100", "200", "300"])]
# epsilon = ""
epsilon = 1
filtered_df = filtered_df[filtered_df["epsilon"] == epsilon]
filtered_df = filtered_df[filtered_df["hist_bin"] == 32]
# cm = sns.color_palette("Blues_r", 10) + sns.color_palette("Reds", 10) + sns.color_palette("Purples",1)
cm = None
# sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm)
ax = sns.barplot(data=filtered_df, x="num_trees", y="test_auc", hue="args", palette=cm, ci="sd")
# plt.title(dataset + "- max_depth:" + max_depth + " epsilon: " + str(epsilon))
leg = plt.legend( loc = 'lower right')
# ax.legend(bbox_to_anchor=(1.1, 1.05))
# plt.tight_layout()
plt.ylim(0.5)
plt.xlabel("Number of trees (T)")
plt.ylabel("Test AUC")
plt.tight_layout()
plt.savefig(out_path+ "split_candidates_vary_T_" + dataset+".pdf")
plt.clf()
ax = sns.barplot(data=filtered_df, x="num_trees", y="test_auc", hue="args", palette=cm, ci="sd")
leg = plt.legend( loc = 'lower right')
plt.xlabel("Number of trees (T)")
plt.ylabel("Test AUC")
plt.ylim(0.65)
plt.tight_layout()
plt.savefig(out_path+ "split_candidates_vary_T_" + dataset+"_zoom.pdf")
plt.clf()
ax = sns.lineplot(data=filtered_df, x="num_trees", y="test_auc", hue="args", palette=cm, ci="sd")
leg = plt.legend( loc = 'lower right')
plt.xlabel("Number of trees (T)")
plt.ylabel("Test AUC")
plt.tight_layout()
plt.savefig(out_path+ "split_candidates_vary_T_" + dataset+"_lineplot.pdf")
plt.clf()
#========================= VARY Q =======================
filter1 = df[(df["args"].str.contains("TR")) & ~(df["args"].str.contains("EBM")) & (~df["args"].str.contains("adaptive_hessian"))]
filter2 = df[df["args"].str.contains("adaptive_hessian rounds: 5")]
filtered_df = pd.concat([filter1,filter2])
filtered_df = filtered_df.reset_index()
filtered_df["args"] = filtered_df["sketch_type"]
arg_map = {" uniform" : "Uniform",
" log" : "Log",
" exact_quantiles" : "Quantiles",
" adaptive_hessian" : "Iterative Hessian (s=5)"}
filtered_df = filtered_df.replace({"args":arg_map})
# TR with EBM splits
# df = df[(df["args"].str.contains("TR")) & (df["args"].str.contains("EBM"))]
# Hist with EBM splits
# df = df[~(df["args"].str.contains("TR")) & (df["args"].str.contains("EBM"))]
filtered_df = filtered_df[filtered_df["dataset"] == dataset]
max_depth = "4"
filtered_df = filtered_df[filtered_df["max_depth"] == max_depth]
trees = "100"
filtered_df = filtered_df[filtered_df["num_trees"] == trees]
# epsilon = ""
epsilon = 1
filtered_df = filtered_df[filtered_df["epsilon"] == epsilon]
# cm = sns.color_palette("Blues_r", 10) + sns.color_palette("Reds", 10) + sns.color_palette("Purples",1)
cm = None
# sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm)
ax = sns.lineplot(data=filtered_df, x="hist_bin", y="test_auc", hue="args", palette=cm, ci="sd", marker="o")
# plt.title(dataset + "- max_depth:" + max_depth + " epsilon: " + str(epsilon))
leg = plt.legend( loc = 'lower right')
# ax.legend(bbox_to_anchor=(1.1, 1.05))
# plt.tight_layout()
plt.ylim(ylim)
plt.xlabel("Number of Split Candidates (Q)")
plt.ylabel("Test AUC")
plt.tight_layout()
plt.savefig(out_path + "/split_candidates_vary_Q_" + dataset + ".pdf")
#plt.show()
plt.clf()
# ==================== VARY s PLOT ====================
filtered_df = df[(df["args"].str.contains("TR")) & ~(df["args"].str.contains("EBM")) & (df["args"].str.contains("adaptive_hessian"))]
# TR with EBM splits
# df = df[(df["args"].str.contains("TR")) & (df["args"].str.contains("EBM"))]
# Hist with EBM splits
# df = df[~(df["args"].str.contains("TR")) & (df["args"].str.contains("EBM"))]
filtered_df = filtered_df[filtered_df["dataset"] == dataset]
max_depth = "4"
filtered_df = filtered_df[filtered_df["max_depth"] == max_depth]
trees = "100"
filtered_df = filtered_df[filtered_df["num_trees"] == trees]
filtered_df = filtered_df[filtered_df["hist_bin"] == 32]
# epsilon = ""
# epsilon = 1
# filtered_df = filtered_df[filtered_df["epsilon"] == epsilon]
filtered_df["sketch_rounds"][filtered_df["sketch_rounds"]==" inf"] = trees
filtered_df["args"] = "IH (s=" + filtered_df["sketch_rounds"] + ")"
filtered_df = filtered_df.replace({"args":arg_map})
cm = sns.color_palette("Reds_r", 5) + sns.color_palette("Purples",1)
# sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm)
ax = sns.barplot(data=filtered_df, x="epsilon", y="test_auc", hue="args", palette=cm, ci="sd")
# plt.title(dataset + "- max_depth:" + max_depth + " epsilon: " + str(epsilon))
leg = plt.legend( loc = 'lower right')
plt.xlabel("Privacy Budget ($\epsilon$)")
plt.ylabel("Test AUC")
# ax.legend(bbox_to_anchor=(1.1, 1.05))
# plt.tight_layout()
plt.ylim(0.5)
plt.tight_layout()
plt.savefig(out_path + "split_candidates_vary_s_" + dataset+".pdf")
#plt.show()
plt.clf()
cm = sns.color_palette("Reds_r", 5)
ax = sns.lineplot(data=filtered_df, x="epsilon", y="test_auc", hue="args", marker="o", palette=cm, ci="sd")
leg = plt.legend( loc = 'lower right')
plt.xlabel("Privacy Budget ($\epsilon$)")
plt.ylabel("Test AUC")
plt.ylim(0.7)
plt.tight_layout()
plt.savefig(out_path + "split_candidates_vary_s_" + dataset+"_lineplot.pdf")
plt.clf()
def table_split_candidate():
df = pd.read_csv("./paper_results/E3_split_candidate_methods_final.csv")
df2 = pd.read_csv("./paper_results/E3_split_candidate_methods_IH.csv")
df = df[~df["args"].str.contains("adaptive_hessian")]
df = pd.concat([df, df2])
df = process_df_for_plotting(df)
# Needed for ebm rework dataset
df["ebm"] = False
df["ebm"][df["feature_interaction_method"] == "cyclical"] = True
df["args"] = df["dp_method"] + df["split_method"] + df["ebm"].astype("str")
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"DPtotally_randomFalse" : "DP-XGBoost TR",
"DPtotally_randomTrue" : "DP-XGBoost TR EBM",
"DPhist_basedTrue" : "DP-XGBoost EBM",
"DPhist_basedFalse" : "DP-XGBoost",}
df = df.replace({"args":arg_map})
df["args"] = df["args"] + " -" + df["sketch_type"] + " rounds:" + df["sketch_rounds"].astype("str") + df["ebm"].astype("str")
print(set(df["args"].values))
# Filter plot params
# TR, no EBM, rounds=5 best one
df1 = df[(df["args"].str.contains("TR")) & ~(df["args"].str.contains("True")) & (~df["args"].str.contains("adaptive_hessian"))]
df2 = df[df["args"].str.contains("adaptive_hessian rounds: 5") & (df["args"].str.contains("False"))]
df = pd.concat([df1,df2])
df = df.reset_index()
max_depth = "4"
df = df[df["max_depth"] == max_depth]
trees = "100" # old dataset
# trees = "200" # new dataset
df = df[df["num_trees"] == trees]
# epsilon = ""
epsilon = 1
df = df[df["epsilon"] == epsilon]
df = df[df["hist_bin"] == 32]
df = df[~df["args"].str.contains("feverless")]
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# print(df.groupby(["epsilon", "args", "num_trees", "max_depth"]).mean()["test_auc"])
print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).mean()["test_auc"])
print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).std()["test_auc"])
means = df.groupby(["dataset", "args"]).mean()["test_auc"].round(4).astype("str")
max_vals = df.groupby(["dataset", "args"]).mean()["test_auc"].round(4).groupby(["dataset"]).max().astype("str")
means[means.isin(max_vals)] = "\\textbf{" + means[means.isin(max_vals)] + "}"
sds = df.groupby(["dataset", "args"]).std()["test_auc"].round(4).astype("str")
table = means + " (" + sds + ")"
print(table)
datasets, args = zip(*table.index)
# column_index = ["Iterative Hessian (s=5)", "Quantiles", "Log", "Uniform"]
column_index = ["IH (s=5)", "Quantiles", "Log", "Uniform"]
datasets = list(dict.fromkeys(datasets))
print(datasets)
print(column_index)
table = pd.DataFrame(table.values.reshape(len(datasets), -1), columns=column_index, index=datasets)
# table = table.style.applymap(is_bold)
# table = pd.DataFrame(table.values.reshape(-1, len(datasets)), columns=datasets, index=column_index)
print(table)
print(table.to_latex(escape=False))
# ============================================= E4 - Feature Interactions =============================================
def plot_k_way(in_path="./paper_results/E4_k_way.csv", out_path="./paper_plots/", replication=False, dataset="Credit 1"):
df = pd.read_csv(in_path)
df = process_df_for_plotting(df)
df["args"] = df["split_method"] + df["sketch_type"] + \
+ df["weight_update"] + df["training_method"]
df["args"] = df["args"].str.replace(" ", "")
df["args"] = df["args"].str.replace("'", "")
arg_map = {"totally_randomuniformxgboostboosting" : "DP-TR Newton",
"totally_randomuniformgbmboosting": "DP-TR Gradient"}
df = df.replace({"args":arg_map})
df["feature_interaction_k"] = df["feature_interaction_k"].str.replace("None", "m")
df["args"] = df["args"] + " " + df["feature_interaction_method"] + " (k="+ df["feature_interaction_k"] + ")"
df["args"] = df["args"].str.replace("'", "")
cm = sns.color_palette("Blues_r", 6) + sns.color_palette("Reds_r", 6)
cm = sns.color_palette("Blues_r", 6)
df = df[df["dataset"] == dataset]
df = df[df["max_depth"] == "4"]
df = df[df["epsilon"] == "1"]
df = df[df["args"].str.contains("cyclical")]
df = df[df["args"].str.contains("Newton")]
df = df.sort_values("args")
ax = sns.lineplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[0:], labels=labels[0:], loc="lower right")
plt.xlabel("Number of Trees (T)")
plt.ylabel("Test AUC")
plt.tight_layout()
plt.savefig(out_path + "feature_interactions_vary_k_" + dataset+".pdf")
plt.clf()
def plot_ebm_comparisons(in_path="./paper_results/E4_k_way.csv", out_path="./paper_plots/", replication=False, dataset="Credit 1"):
df = pd.read_csv(in_path)
df = process_df_for_plotting(df)
# Needed for ebm rework dataset
df["ebm"] = True
df["ebm"][df["feature_interaction_method"] == "standard"] = False
df["ebm"] = df["ebm"].astype("str")
df["args"] = df["split_method"] + df["weight_update"] + df["ebm"]
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"totally_randomgbmFalse" : "DP-TR Gradient ($k=m$)",
"totally_randomgbmTrue" : "DP-TR Gradient EBM ($k=1$)",
"totally_randomxgboostFalse": "DP-TR Newton ($k=m$)",
"totally_randomxgboostTrue": "DP-TR Newton EBM ($k=1$)",
"hist_basedgbmTrue" : "DP-Hist Gradient EBM ($k=1$)",
"hist_basedxgboostTrue" : "DP-Hist Newton EBM ($k=1$)"}
df = df.replace({"args":arg_map})
df = df[df["args"].str.contains("TR")]
df = df.sort_values("args")
print("Options:", set(df["dataset"].values),
set(df["epsilon"].values),
set(df["max_depth"].values))
# =================== Vary T ===================
filtered_df = df[df["dataset"] == "Credit 1"]
filtered_df = filtered_df[filtered_df["epsilon"] == "1"]
filtered_df = filtered_df[filtered_df["max_depth"] == "4"]
print(filtered_df)
# cm = sns.color_palette("Purples_r", 2) + sns.color_palette("Greens_r", 2)
cm = None
ax = sns.lineplot(data=filtered_df, x="num_trees", y="test_auc", hue="args", palette=cm)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[0:], labels=labels[0:], loc="lower right")
ax.set_xlabel("Number of trees (T)")
ax.set_ylabel("Test AUC")
plt.tight_layout()
plt.savefig(out_path+ "dp_ebm_vary_T_" + dataset+".pdf")
plt.clf()
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(filtered_df.groupby(["dataset", "epsilon", "args", "num_trees", "max_depth"]).mean()["test_auc"])
# ============================================= E5 - Batched Updates =============================================
def table_low_eps_bb():
# df = pd.read_csv("./paper_results/pre_paper/rf_boosting_low_eps_pre_paper.csv")
# df1 = pd.read_csv("./paper_results/pre_paper/rf_boosting_low_eps_vec_mech_pre_paper.csv")
# df = pd.concat([df,df1])
# df = df.reset_index()
df = pd.read_csv("./paper_results/E5_rf_boosting_final.csv")
df = process_df_for_plotting(df)
# Needed for ebm rework dataset
df["ebm"] = True
df["ebm"][df["feature_interaction_method"] == "standard"] = False
df["args"] = df["dp_method"] + df["training_method"] + df["ebm"].astype("str") + df["weight_update"] + df["vec_mech"]
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
arg_map = {
"DPbatched_boostingTrueTrue" : "DP-TR Batch Newton EBM (Vec Mech)",
"DPbatched_boostingFalseTrue" : "DP-TR Batch Newton (Vec Mech)",
"DPbatched_boostingTrueFalse" : "DP-TR Batch Newton EBM",
"DPbatched_boostingFalseFalse" : "DP-TR Batch Newton",
"DPboostingFalsexgboostTrue" : "DP-TR Newton (Vec Mech)",
"DPboostingFalsegbmTrue" : "DP-TR Gradient",
"DPboostingTruegbmFalse" : "DP-TR Gradient EBM",
"DPboostingTruexgboostTrue" : "DP-TR Newton EBM (Vec Mech)",
"DPboostingTruexgboostFalse" : "DP-TR Newton EBM",
"DPboostingFalsexgboostFalse" : "DP-TR Newton",
"DPrfTruerfFalse" : "DP-RF EBM",
"DPrfTruerfTrue" : "DP-RF EBM (Vec Mech)",
"DPrfFalserfTrue" : "DP-RF (Vec Mech)",
"DPrfFalserfFalse" : "DP-RF"}
df = df.replace({"args":arg_map})
df["args"] = df["args"] + " " + df["batch_perc"].astype("str")
df = df.replace({"args":arg_map})
# Filter plot params
# dataset = "Credit 1"
# df = df[df["dataset"] == dataset]
max_depth = "4"
df = df[df["max_depth"] == max_depth]
trees = "200"
df = df[df["num_trees"] == trees]
# epsilon = ""
# epsilon = 0.5
# df = df[df["epsilon"] == epsilon]
df = df[df["epsilon"] == 0.1]
df = df[~df["args"].str.contains("EBM")]
df = df[~df["args"].str.contains("gbm")]
df = df[~df["args"].str.contains("Gradient")]
# df = df[~df["args"].str.contains("Vec Mech")]
df = df[~df["args"].str.contains("0.34")]
df = df[~df["args"].str.contains("0.75")]
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).mean()["test_auc"])
print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).std()["test_auc"])
means = df.groupby(["dataset", "args"]).mean()["test_auc"].round(4).astype("str")
sds = df.groupby(["dataset", "args"]).std()["test_auc"].round(4).astype("str")
max_vals = df.groupby(["dataset", "args"]).mean()["test_auc"].round(4).groupby(["dataset"]).max().astype("str")
means[means.isin(max_vals)] = "\\textbf{" + means[means.isin(max_vals)] + "}"
table = means + " (" + sds + ")"
# for val in max_vals.values:
# table[table.str.contains(val)] = "\\textbf{" + table[table.str.contains(val)] + "}"
print(table)
datasets, args = zip(*table.index)
# percs = [0.05, 0.1, 0.25, 0.34, 0.5, 0.75, 1]
percs = [0.05, 0.1, 0.25, 0.5, 1]
column_index = []
for i in range(0, len(percs)+2):
if i <= 4:
column_index.append("Batch (B=" + str(int(int(trees)*percs[i])) + ")")
elif i == 5:
column_index.append("DP-RF (B="+trees +")")
else:
column_index.append("Newton (B=1)")
datasets = list(dict.fromkeys(datasets))
print(datasets)
print(column_index)
table = pd.DataFrame(table.values.reshape(len(datasets), -1), columns=column_index, index=datasets)
# table = pd.DataFrame(table.values.reshape(-1, len(datasets)), columns=datasets, index=column_index)
print(table.transpose(), "\n")
print(table.transpose().to_latex(escape=False))
def plot_low_eps_bb(in_path="./paper_results/E5_rf_boosting_final.csv", out_path="./paper_plots/", replication=False, dataset="Credit 1"):
set_fontsize(12)
df = pd.read_csv(in_path)
df = process_df_for_plotting(df)
# Needed for ebm rework dataset
df["ebm"] = True
df["ebm"][df["feature_interaction_method"] == "standard"] = False
df["args"] = df["dp_method"] + df["training_method"] + df["ebm"].astype("str") + df["weight_update"] + df["vec_mech"]
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
arg_map = {
"DPbatched_boostingTrueTrue" : "DP-TR Batch Newton EBM (Vec Mech)",
"DPbatched_boostingFalseTrue" : "DP-TR Batch Newton (Vec Mech)",
"DPbatched_boostingTrueFalse" : "DP-TR Batch Newton EBM",
"DPbatched_boostingFalseFalse" : "DP-TR Batch Newton",
"DPboostingFalsexgboostTrue" : "DP-TR Newton (Vec Mech)",
"DPboostingFalsegbmTrue" : "DP-TR Gradient",
"DPboostingTruegbmFalse" : "DP-TR Gradient EBM",
"DPboostingTruexgboostTrue" : "DP-TR Newton EBM (Vec Mech)",
"DPboostingTruexgboostFalse" : "DP-TR Newton EBM",
"DPboostingFalsexgboostFalse" : "DP-TR Newton",
"DPrfTruerfFalse" : "DP-RF EBM",
"DPrfTruerfTrue" : "DP-RF EBM (Vec Mech)",
"DPrfFalserfTrue" : "DP-RF (Vec Mech)",
"DPrfFalserfFalse" : "DP-RF"}
df = df[~df["batch_perc"].astype("str").str.contains("0.34")]
df = df[~df["batch_perc"].astype("str").str.contains("0.75")]
df = df.replace({"args":arg_map})
# Filter plot params
df = df[df["dataset"] == dataset]
max_depth = "4"
df = df[df["max_depth"] == max_depth]
trees = "200"
df = df[df["num_trees"] == trees]
df["batch_perc"][df["args"].str.contains("DP-TR Newton")] = 1/int(trees)
df["args"] += " (B=" + (df["batch_perc"] * int(trees)).astype("int").astype("str") + ")"
# epsilon = ""
# epsilon = 0.5
# df = df[df["epsilon"] == epsilon]
df = df[df["epsilon"].isin([0.1, 0.5])]
df = df[~df["args"].str.contains("EBM")]
df = df[~df["args"].str.contains("Gradient")]
df = df[~df["args"].str.contains("gbm")]
# df = df[~df["args"].str.contains("Vec Mech")] # Comment this out for fixed data
df["args"] = df["args"].str.replace("\(Vec Mech\)", "")
# cm = sns.color_palette("Blues_r", 1) + sns.color_palette("Oranges_r", 1) + sns.color_palette("Reds", 8)
cm = sns.color_palette("Blues_r", 1) + sns.color_palette("Greens_r", 1) + sns.color_palette("Reds", 7)
# sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm)
ax = sns.barplot(data=df, x="epsilon", y="test_auc", hue="args", palette=cm, ci="sd")
# ax = sns.lineplot(data=df, x="epsilon", y="test_auc", hue="args", palette=cm, ci="sd")
# plt.title(dataset + "- max_depth:" + max_depth + " epsilon: " + str(epsilon))
# leg = plt.legend( loc = 'lower right')
# plt.ylim(0.72, 0.81)
plt.ylim(0.5)
# ax.legend(bbox_to_anchor=(1.1, 1.05))
# plt.tight_layout()
plt.xlabel("Privacy Budget ($\epsilon$)")
plt.ylabel("Test AUC")
# remove df column name from legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[0:], labels=labels[0:], loc="lower right")
plt.tight_layout()
plt.savefig(out_path + "low_eps_bb_" + dataset + ".pdf")
plt.clf()
ax = sns.barplot(data=df, x="epsilon", y="test_auc", hue="args", palette=cm, ci="sd")
# ax = sns.lineplot(data=df, x="epsilon", y="test_auc", hue="args", palette=cm, ci="sd")
# plt.title(dataset + "- max_depth:" + max_depth + " epsilon: " + str(epsilon))
# leg = plt.legend( loc = 'lower right')
# plt.ylim(0.72, 0.81)
# ax.legend(bbox_to_anchor=(1.1, 1.05))
# plt.tight_layout()
plt.xlabel("Privacy Budget ($\epsilon$)")
plt.ylabel("Test AUC")
# remove df column name from legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[0:], labels=labels[0:], loc="lower right")
plt.tight_layout()
plt.savefig(out_path + "low_eps_bb_" + dataset + "_zoom.pdf")
plt.clf()
# ============================================= E6 - Comparisons =============================================
def plot_comparisons(in_path=None, out_path="./paper_plots/", replication=False, dataset="Credit 1",
reduced=False, ylim1=0.5, ylim2=0.82):
set_fontsize(11)
if not replication:
df = pd.read_csv("./paper_results/E6_comparisons_fix.csv")
df2 = pd.read_csv("./paper_results/E6_comparisons_IH.csv")
df = df[~df["args"].str.contains("adaptive_hessian")]
df = df[~df["args"].str.contains("batched_boosting")]
df = pd.concat([df,df2])
non_dp_df = pd.read_csv("./paper_results/non_dp_comparisons.csv")
else:
df = pd.read_csv(in_path)
non_dp_df = pd.read_csv("../paper_experiments/paper_results/non_dp_comparisons.csv")
# ldp_df = pd.read_csv("./paper_results/ldp.csv")
ldp_df = pd.read_csv("./paper_results/ldp_tr.csv")
print(ldp_df["args"].unique())
df = pd.concat([df, ldp_df])
df = df.reset_index()
df = process_df_for_plotting(df)
non_dp_df = process_df_for_plotting(non_dp_df)
non_dp_df = non_dp_df[non_dp_df["args"].str.contains("exact_quantiles")]
df["args"] = df["dp_method"] + df["split_method"] + df["sketch_type"] + \
+ df["weight_update"] + df["training_method"] +\
df["feature_interaction_method"] + df["feature_interaction_k"]
df["args"] = df["args"].str.replace(" ", "")
df["args"] = df["args"].str.replace("'", "")
non_dp_df["args"] = non_dp_df["args"].str.replace("'", "")
arg_map = {'DPhist_basedfeverlessxgboostboostingstandardd' : "FEVERLESS (sketch)",
'DPtotally_randomuniformbatched_boostingcyclical1' : "DP-TR Batch EBM",
'DPtotally_randomadaptive_hessianbatched_boostingstandardd' : "DP-TR Batch IH",
'DPtotally_randomuniformgbmboostingcyclical1' : "DP-EBM",
'DPtotally_randomuniformxgboostboostingrandom2' : "DP-TR Newton (Random k=2)",
'DPtotally_randomuniformxgboostboostingstandardd' : "DP-TR Newton",
'gaussian_ldphist_baseduniformxgboostboostingstandardd' : "LDP",
'gaussian_ldptotally_randomuniformxgboostboostingstandardd' : "LDP-TR",
'DPtotally_randomadaptive_hessianxgboostboostingstandardd' : "DP-TR Newton IH",
'DPtotally_randomuniformrfrfstandardd' : "DP-RF",
'DPtotally_randomuniformbatched_boostingstandardd' : "DP-TR Batch",
'DPtotally_randomuniformxgboostboostingrandom5' : "DP-TR Newton (Random k=5)",
'DPhist_baseduniformgbmboostingstandardd' : "DP-GBM",
'DPtotally_randomuniformxgboostboostingcyclical2' : "DP-TR Newton (Cyclical k=2)",
'DPtotally_randomuniformxgboostboostingcyclical1' : "DP-EBM Newton",
'DPhist_baseduniformxgboostboostingstandardd' : "FEVERLESS (uniform)",
'DPtotally_randomadaptive_hessianbatched_boostingcyclical1': "DP-TR Batch Newton IH EBM",
'DPtotally_randomadaptive_hessianxgboostboostingcyclical1': "DP-TR Newton IH EBM",
}
df = df.replace({"args":arg_map})
non_dp_df = non_dp_df.replace({"args": arg_map})
non_dp_df["args"] = "XGBoost (Non-private)"
a = 0.05
df["batch_perc"] = round(df["batch_perc"]/ a) * a
df["batch_perc"] = df["batch_perc"].astype("str")
df["batch_perc"][df["batch_perc"] == "0.3"] = "0.25" # It got rounded up...
df["batch_perc"][df["batch_perc"] == "0.25"] = "(p=0.25)"
df["batch_perc"][~df["args"].str.contains("Batch")] = ""
df["batch_perc"][df["batch_perc"] == "1.0"] = "(p=1)"
# print(df["batch_perc"])
df["args"] += " " + df["batch_perc"].astype("str")
df["num_trees"] = df["num_trees"].astype('int')
print(set(df["args"].values))
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton",
"DP-TR Newton IH",
"DP-TR Batch IH (p=0.25)",
"DP-TR Batch IH (p=1)",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM"]
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton IH",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM",
"LDP",
"LDP-TR"]
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton IH",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM",
"LDP",]
if reduced:
filter_list = ["FEVERLESS (uniform)",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton IH",
"DP-TR BatchBoost IH 0.25",
"DP-TR BatchBoost IH"]
styles = {
"FEVERLESS (uniform)": "",
"DP-GBM": "",
"DP-RF": "",
"DP-EBM": (1, 1),
"DP-EBM Newton": (1, 1),
"DP-TR Newton IH": (4, 1.5),
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": "",
"LDP": ""
}
styles = {
"FEVERLESS (uniform)": (4, 1.5),
"DP-GBM": (4, 1.5),
"DP-RF": (4, 1.5),
"DP-EBM": (4, 1.5),
"DP-EBM Newton": "",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": "",
"DP-TR Batch Newton IH EBM (p=0.25)": "",
"DP-TR Newton IH EBM": "",
"XGBoost (Non-private)": (4, 1.5),
"LDP": (4, 1.5),
"LDP-TR": (4, 1.5),
}
markers = {
"FEVERLESS (uniform)": ".",
"DP-GBM": ".",
"DP-RF": ".",
"DP-EBM": "v",
"DP-EBM Newton": "v",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": ""
}
df["args"] = df["args"].str.strip(" ")
df = df[df["args"].isin(filter_list)]
# print(set(df["epsilon"].values))
# print(set(df["max_depth"].values))
# print(set(df["dataset"]))
# Filter df
df = df[df["dataset"] == dataset]
df = df[df["max_depth"] == "4"]
df = df[df["epsilon"] == 1]
non_dp_df = non_dp_df[non_dp_df["dataset"] == dataset]
non_dp_df = non_dp_df[non_dp_df["max_depth"] == "4"]
df = pd.concat([df, non_dp_df])
df = df.sort_values("args")
print(set(df["args"].values))
print(len(set(df["args"].values)))
print(df.columns)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
# ax = sns.lineplot(data=df, x="num_trees", y="f1_score", hue="args")
cm = None
print(set(df["args"].values))
print(len(filter_list)+1)
cm = None
cm = sns.color_palette("deep", len(filter_list)+1)
cm[-1] = "black"
cm[5] = "#933136"
cm[7] = "#2C760A"
sns.lineplot(data=df, x="num_trees", y="test_auc", hue="args", ax=ax1, palette=cm, style="args", dashes=styles)
# sns.lineplot(data=df, x="num_trees", y="test_auc", hue="args", ax=ax1, palette=cm)
# handles, labels = ax.get_legend_handles_labels()
# ax.legend(handles=handles[0:], labels=labels[0:])
# ax.legend(handles,labels, ncol=2) # you can specify any location parameter you want here
ax1.set_ylim(ylim1)
ax1.set_xlabel("Number of Trees (T)")
ax1.set_ylabel("Test AUC")
# plt.savefig(filepath + "comparisons_" + dataset + ".pdf")
#plt.show()
# plt.clf()
# cm = sns.color_palette("bright", len(filter_list)+1)
sns.lineplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm, style="args", dashes=styles, ax=ax2)
# sns.lineplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm, ax=ax2)
# handles, labels = ax.get_legend_handles_labels()
# ax.legend(handles=handles[0:], labels=labels[0:])
# ax.legend(handles,labels, ncol=2) # you can specify any location parameter you want here
ax2.set_ylim(ylim2)
ax2.set_xlabel("Number of Trees (T)")
ax2.set_ylabel("Test AUC")
plt.tight_layout()
handles, labels = ax2.get_legend_handles_labels()
ax1.get_legend().remove()
ax2.get_legend().remove()
# fig.legend(handles, labels, loc='lower left', ncol=2, bbox_to_anchor=(0, 1.02, 1.02), expand=True, borderaxespad=0,)
# fig.legend(handles, labels, loc=9, ncol=2)
from textwrap import fill
labels = [fill(l, 20) for l in labels]
fig.legend(handles, labels, loc="center right", ncol=1)
fig.subplots_adjust(right=0.75)
# fig.subplots_adjust(top=0.75)
plt.savefig(out_path + "comparisons_zoom_" + dataset + ".pdf", bbox_inches="tight")
plt.clf()
def table_comparisons():
df = pd.read_csv("./paper_results/E6_comparisons_fix.csv")
df2 = pd.read_csv("./paper_results/E6_comparisons_IH.csv")
df = df[~df["args"].str.contains("adaptive_hessian")]
df = df[~df["args"].str.contains("batched_boosting")]
df = pd.concat([df,df2])
df = df.reset_index()
df = process_df_for_plotting(df)
df["args"] = df["split_method"] + df["sketch_type"] + \
+ df["weight_update"] + df["training_method"] +\
df["feature_interaction_method"] + df["feature_interaction_k"]
df["args"] = df["args"].str.replace(" ", "")
df["args"] = df["args"].str.replace("'", "")
arg_map = {'hist_basedfeverlessxgboostboostingstandardd' : "FEVERLESS (sketch)",
'totally_randomuniformbatched_boostingcyclical1' : "DP-TR Batch EBM",
'totally_randomadaptive_hessianbatched_boostingstandardd' : "DP-TR Batch IH",
'totally_randomuniformgbmboostingcyclical1' : "DP-EBM",
'totally_randomuniformxgboostboostingrandom2' : "DP-TR Newton (Random k=2)",
'totally_randomuniformxgboostboostingstandardd' : "DP-TR Newton",
'totally_randomadaptive_hessianxgboostboostingstandardd' : "DP-TR Newton IH",
'totally_randomuniformrfrfstandardd' : "DP-RF",
'totally_randomuniformbatched_boostingstandardd' : "DP-TR Batch",
'totally_randomuniformxgboostboostingrandom5' : "DP-TR Newton (Random k=5)",
'hist_baseduniformgbmboostingstandardd' : "DP-GBM",
'totally_randomuniformxgboostboostingcyclical2' : "DP-TR Newton (Cyclical k=2)",
'totally_randomuniformxgboostboostingcyclical1' : "DP-EBM Newton",
'hist_baseduniformxgboostboostingstandardd' : "FEVERLESS (uniform)",
'totally_randomadaptive_hessianbatched_boostingcyclical1': "DP-TR Batch Newton IH EBM",
'totally_randomadaptive_hessianxgboostboostingcyclical1': "DP-TR Newton IH EBM",
}
df = df.replace({"args":arg_map})
a = 0.05
df["batch_perc"] = round(df["batch_perc"]/ a) * a
df["batch_perc"][df["batch_perc"] == 0.3] = 0.25
df["batch_perc"][df["batch_perc"] == 1] = ""
print(df["batch_perc"])
df["args"] += " " + df["batch_perc"].astype("str")
df["num_trees"] = df["num_trees"].astype('int')
print(set(df["args"].values))
filter_list = set(df["args"].values)
# filter_list = ["FEVERLESS (uniform) ",
# "DP-GBM ",
# "DP-RF ",
# "DP-EBM ",
# "DP-EBM Newton ",
# "DP-TR Newton ",
# "DP-TR Newton IH ",
# "DP-TR BatchBoost IH 0.25",
# "DP-TR BatchBoost IH "]
#
# df = df[df["args"].isin(filter_list)]
# Filter df
df = df[df["max_depth"] == "4"]
df = df[df["epsilon"] == 1]
datasets = set(df["dataset"])
filtered = []
for arg in filter_list:
for dataset in datasets:
new_filter = df[df["args"] == arg]
new_filter = new_filter[new_filter["dataset"] == dataset]
avgs = new_filter.groupby(["num_trees"])["test_auc"].mean()
max = avgs.argmax()
t = avgs.index[max]
new_filter = new_filter[new_filter["num_trees"] == t]
filtered.append(new_filter)
df = pd.concat(filtered)
df["args"] = df["args"].str.strip(" ")
df = df.sort_values("args")
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).mean()["test_auc"])
# print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).std()["test_auc"])
means = df.groupby(["dataset", "args"]).mean()["test_auc"].round(4).astype("str")
sds = df.groupby(["dataset", "args"]).std()["test_auc"].round(4).astype("str")
max_vals = df.groupby(["dataset", "args"]).mean()["test_auc"].round(4).groupby(["dataset"]).max().astype("str")
means[means.isin(max_vals)] = "\\textbf{" + means[means.isin(max_vals)] + "}"
table = means + " (" + sds + ")" + " T=" + df.groupby(["dataset", "args"]).mean()["num_trees"].astype(int).astype("str")
# for val in max_vals.values:
# table[table.str.contains(val)] = "\\textbf{" + table[table.str.contains(val)] + "}"
print(table)
datasets, args = zip(*table.index)
datasets = list(dict.fromkeys(datasets))
print(datasets)
unique_args = []
for arg in args:
if arg not in unique_args:
unique_args.append(arg)
table = pd.DataFrame(table.values.reshape(len(datasets), -1), columns=unique_args, index=datasets)
# table = pd.DataFrame(table.values.reshape(-1, len(datasets)), columns=datasets, index=column_index)
print(table.transpose(), "\n")
print(table.transpose().to_latex(escape=False))
# ============================================= Not Used =============================================
# Not used
def plot_non_dp_ebm(filepath="./paper_plots/", dataset="Credit 1"):
df = pd.read_csv("./paper_results/E4_non_dp_ebm.csv")
df = process_df_for_plotting(df)
# Needed for ebm rework dataset
df["ebm"] = True
df["ebm"][df["feature_interaction_method"] == "standard"] = False
df["ebm"] = df["ebm"].astype("str")
print("Options:", set(df["dataset"].values), set(df["epsilon"].values), set(df["max_depth"].values))
depth = '4'
df = df[df["dataset"] == dataset]
df = df[df["max_depth"] == depth]
cm = sns.color_palette("Purples_r", 2) + sns.color_palette("Greens_r", 2)
cm = None
df["args"] = df["split_method"] + df["weight_update"] + df["ebm"]
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"totally_randomgbmFalse" : "Gradient",
"totally_randomgbmTrue" : "Gradient EBM",
"totally_randomxgboostFalse": "Newton",
"totally_randomxgboostTrue": "Newton EBM"}
df = df.replace({"args":arg_map})
df = df.sort_values("args")
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(df.groupby(["dataset", "args", "num_trees", "max_depth"]).mean()["test_auc"])
ax = sns.lineplot(data=df, x="num_trees", y="test_auc", hue="args", ci="sd", palette=cm)
cm = None
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[0:], labels=labels[0:], loc="lower right")
# plt.title(dataset + " depth " + depth)
plt.xlabel("Number of trees (T)")
plt.ylabel("Test AUC")
#plt.show()
plt.tight_layout()
plt.savefig(filepath+ "non_dp_ebm_vary_T_" + dataset+".pdf")
plt.clf()
def plot_grad_budgets():
df = pd.read_csv("./paper_results/gradient_budget_alloc.csv")
df2 = pd.read_csv("./paper_results/dp_split_methods_with_update.csv")
df = pd.concat([df, df2])
df = df.reset_index()
df = process_df_for_plotting(df)
df = df[df["num_trees"] == "10"]
df = df[df["split_method"] == "hist_based"]
df["args"] = df["dp_method"]
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"DP" : "DP-XGBoost",}
df = df.replace({"args":arg_map})
df["args"] += " " + df["split_method"] + " " + df["weight_update"] + " " + df["gradient_budgets"]
# Filter plot params
# dataset = "Credit 1"
# df = df[df["dataset"] == dataset]
#
# max_depth = "2"
# df = df[df["max_depth"] == max_depth]
#
# trees = "200"
# df = df[df["num_trees"] == trees]
# epsilon = ""
# epsilon = 0.5
# df = df[df["epsilon"] == epsilon]
df = df[df["args"].str.contains("xgboost")]
# cm = sns.color_palette("Blues_r", 10) + sns.color_palette("Reds", 10) + sns.color_palette("Purples",1)
# sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm)
# ax = sns.barplot(data=df, x="epsilon", y="test_auc", hue="args", ci="sd")
# ax = sns.pointplot(data=df, x="es_window", y="test_auc", hue="es_threshold")
# plt.title(dataset + "- max_depth:" + max_depth + " epsilon: " + str(epsilon))
# leg = plt.legend( loc = 'upper right')
# ax.legend(bbox_to_anchor=(1.1, 1.05))
# plt.tight_layout()
# plt.show()
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# print(df.groupby(["epsilon", "args", "num_trees", "max_depth"]).mean()["test_auc"])
print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).mean()["test_auc"])
print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).std()["test_auc"])
keys = df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).mean()["test_auc"].index
means = df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).mean()["test_auc"].values
sds = df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).std()["test_auc"].values
print(df.groupby(["gradient_budgets"]).mean()["test_auc"])
# for i in range(len(means)):
# print(keys[i][1], round(means[i],4), "+-", round(sds[i],4))
def ebm_top_bottom_plot():
df = pd.read_csv("../experiment_data/ebm_top_bottom.csv")
df = process_df_for_plotting(df)
print(df["dataset"].unique())
df = df[df["dataset"] == "APS"]
df = df[df["epsilon"] == '0.01']
df = df[df["max_depth"] == '2']
df["args"] = df["dp_method"] + df["split_method"] + df["weight_update"] + df["ebm"] + df["gradient_budgets"]
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
arg_map = {"DPtotally_randomxgboostTruevector_mechanism" : "DP-XGBoost EBM (Vector Mech)",
"DPtotally_randomxgboostFalsevector_mechanism" : "DP-XGBoost (Vector Mech)",
"DPtotally_randomgbmTrue[0.9": "DP-GBM EBM (0.9,0.1)",
"DPtotally_randomgbmFalsevector_mechanism": "DP-GBM (Vector Mech)",
"DPtotally_randomgbmFalse[0.9": "DP-GBM (0.9,0.1)",
"DPhist_basedxgboostTruevector_mechanism": "DP-XGBoost EBM Hist (Vector Mech)",
"DPhist_basedgbmTruevector_mechanism": "DP-GBM EBM Hist (Vector Mech)"}
df = df.replace({"args":arg_map})
# epsilons = ['0.01', '0.1', '0.5', '1']
# for dataset in df["dataset"].unique():
# for eps in epsilons:
# for method in arg_map.values():
# new_df = df[df["args"] == method]
# new_df = new_df[new_df["dataset"] == dataset]
# new_df = new_df[new_df["epsilon"] == eps]
# grouped_df = new_df.groupby(["num_trees", "max_depth"]).mean()["test_auc"]
# argmax = new_df.groupby(["num_trees", "max_depth"]).mean()["test_auc"].idxmax()
# print(dataset, eps, method, argmax, round(new_df.groupby(["num_trees", "max_depth"]).mean()["test_auc"].max(),4))
#
# print("\n")
# print("\n")
# dp_ebm_xgboost = df[df["args"].str.contains("DP-XGBoost EBM \(")]
# dp_xgboost_df = df[df["args"].str.contains("DP-XGBoost \(")]
# dp_gbm_df = df[df["args"].str.contains("DP-GBM \(")]
# dp_hist_df = df[df["args"].str.contains("Hist")]
#
# sns.barplot(data=dp_xgboost_df, x="num_trees", y="test_auc", hue="args", ci="sd")
# sns.barplot(data=dp_hist_df, x="num_trees", y="test_auc", hue="args", ci="sd", palette="Reds")
# sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", ci="sd")
# sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", ci="sd")
# plt.show()
# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# print(df.groupby(["dataset", "epsilon", "args", "num_trees", "max_depth"]).mean()["test_auc"])
#
sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", ci="sd")
#plt.show()
def boosting_rf_plot():
df = pd.read_csv("./paper_results/batch_fix_pre_paper.csv")
df = process_df_for_plotting(df)
print(df["batch_perc"])
df["args"] = df["dp_method"] + df["split_method"] + df["training_method"] + df["batch_perc"].astype("str") + df["ebm"].astype("str") + df["weight_update"]
df["args"] = df["args"].str.replace("'", "")
df["args"] = df["args"].str.replace(" ", "")
# Filter plot params
dataset = "Credit 1"
df = df[df["dataset"] == dataset]
max_depth = "4"
df = df[df["max_depth"] == max_depth]
trees = "100"
df = df[df["num_trees"] == trees]
# epsilon = ""
epsilon = 0.5
df = df[df["epsilon"] == epsilon]
df = df[df["args"].str.contains("True")]
arg_map = {"DPboosting1.0" : "DP-TR XGBoost",
"DPrf1.0" : "DP-TR RF",
"DPrf_boosting0.05": "DP-TR Boosting Averages- 5%",
"DPrf_boosting0.1": "DP-TR Boosting Averages- 10%",
"DPrf_boosting0.25": "DP-TR Boosting Averages- 25%",
"DPrf_boosting0.5": "DP-TR Boosting Averages- 50%",
"DPrf_boosting0.75": "DP-TR Boosting Averages- 75%",}
df = df.replace({"args":arg_map})
cm = sns.color_palette("Blues_r", 1) + sns.color_palette("Oranges_r", 1) + sns.color_palette("Reds", 8)
cm = sns.color_palette("Blues_r", 3) + sns.color_palette("Purples_r", 2) + sns.color_palette("Greens", 7) + sns.color_palette("Reds", 7)
# sns.barplot(data=df, x="num_trees", y="test_auc", hue="args", palette=cm)
ax = sns.barplot(data=df, x="epsilon", y="test_auc", hue="args", palette=cm, ci="sd")
# plt.title(dataset + "- max_depth:" + max_depth + " epsilon: " + str(epsilon))
leg = plt.legend( loc = 'upper right')
ax.legend(bbox_to_anchor=(1.1, 1.05))
plt.tight_layout()
#plt.show()
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# print(df.groupby(["epsilon", "args", "num_trees", "max_depth"]).mean()["test_auc"])
print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).mean()["test_auc"])
print(df.groupby(["dataset", "args", "num_trees", "epsilon", "max_depth"]).std()["test_auc"])
# ============================================= Revision Plots =============================================
def comparison_bubble_plot(in_path=None, out_path="./paper_plots/", replication=False,
reduced=False, ylim1=0.5, ylim2=0.82):
set_fontsize(11)
if not replication:
df = pd.read_csv("./paper_results/E6_comparisons_fix.csv")
df2 = pd.read_csv("./paper_results/E6_comparisons_IH.csv")
df = df[~df["args"].str.contains("adaptive_hessian")]
df = df[~df["args"].str.contains("batched_boosting")]
df = pd.concat([df,df2])
non_dp_df = pd.read_csv("./paper_results/non_dp_comparisons.csv")
else:
df = pd.read_csv(in_path)
non_dp_df = pd.read_csv("../paper_experiments/paper_results/non_dp_comparisons.csv")
df = df.reset_index()
df = process_df_for_plotting(df)
non_dp_df = process_df_for_plotting(non_dp_df)
non_dp_df = non_dp_df[non_dp_df["args"].str.contains("exact_quantiles")]
df["args"] = df["split_method"] + df["sketch_type"] + \
+ df["weight_update"] + df["training_method"] +\
df["feature_interaction_method"] + df["feature_interaction_k"]
df["args"] = df["args"].str.replace(" ", "")
df["args"] = df["args"].str.replace("'", "")
non_dp_df["args"] = non_dp_df["args"].str.replace("'", "")
arg_map = {'hist_basedfeverlessxgboostboostingstandardd' : "FEVERLESS (sketch)",
'totally_randomuniformbatched_boostingcyclical1' : "DP-TR Batch EBM",
'totally_randomadaptive_hessianbatched_boostingstandardd' : "DP-TR Batch IH",
'totally_randomuniformgbmboostingcyclical1' : "DP-EBM",
'totally_randomuniformxgboostboostingrandom2' : "DP-TR Newton (Random k=2)",
'totally_randomuniformxgboostboostingstandardd' : "DP-TR Newton",
'totally_randomadaptive_hessianxgboostboostingstandardd' : "DP-TR Newton IH",
'totally_randomuniformrfrfstandardd' : "DP-RF",
'totally_randomuniformbatched_boostingstandardd' : "DP-TR Batch",
'totally_randomuniformxgboostboostingrandom5' : "DP-TR Newton (Random k=5)",
'hist_baseduniformgbmboostingstandardd' : "DP-GBM",
'totally_randomuniformxgboostboostingcyclical2' : "DP-TR Newton (Cyclical k=2)",
'totally_randomuniformxgboostboostingcyclical1' : "DP-EBM Newton",
'hist_baseduniformxgboostboostingstandardd' : "FEVERLESS (uniform)",
'totally_randomadaptive_hessianbatched_boostingcyclical1': "DP-TR Batch Newton IH EBM",
'totally_randomadaptive_hessianxgboostboostingcyclical1': "DP-TR Newton IH EBM",
}
df = df.replace({"args":arg_map})
non_dp_df = non_dp_df.replace({"args": arg_map})
non_dp_df["args"] = "XGBoost (Non-private)"
a = 0.05
df["batch_perc"] = round(df["batch_perc"]/ a) * a
df["batch_perc"] = df["batch_perc"].astype("str")
df["batch_perc"][df["batch_perc"] == "0.3"] = "0.25" # It got rounded up...
df["batch_perc"][df["batch_perc"] == "0.25"] = "(p=0.25)"
df["batch_perc"][~df["args"].str.contains("Batch")] = ""
df["batch_perc"][df["batch_perc"] == "1.0"] = "(p=1)"
# print(df["batch_perc"])
df["args"] += " " + df["batch_perc"].astype("str")
df["num_trees"] = df["num_trees"].astype('int')
print(set(df["args"].values))
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton",
"DP-TR Newton IH",
"DP-TR Batch IH (p=0.25)",
"DP-TR Batch IH (p=1)",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM"]
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
# "DP-TR Newton",
"DP-TR Newton IH",
# "DP-TR Batch IH (p=0.25)",
# "DP-TR Batch IH (p=1)",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM"]
if reduced:
filter_list = ["FEVERLESS (uniform)",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton IH",
"DP-TR BatchBoost IH 0.25",
"DP-TR BatchBoost IH"]
styles = {
"FEVERLESS (uniform)": "",
"DP-GBM": "",
"DP-RF": "",
"DP-EBM": (1, 1),
"DP-EBM Newton": (1, 1),
"DP-TR Newton IH": (4, 1.5),
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": ""
}
styles = {
"FEVERLESS (uniform)": (4, 1.5),
"DP-GBM": (4, 1.5),
"DP-RF": (4, 1.5),
"DP-EBM": (4, 1.5),
"DP-EBM Newton": "",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": "",
"DP-TR Batch Newton IH EBM (p=0.25)": "",
"DP-TR Newton IH EBM": "",
"XGBoost (Non-private)": (4, 1.5)
}
markers = {
"FEVERLESS (uniform)": "o",
"DP-GBM": "o",
"DP-RF": "o",
"DP-EBM": "o",
"DP-EBM Newton": "v",
"DP-TR Newton IH": "v",
"DP-TR Batch Newton IH EBM (p=1)": "v",
"DP-TR Batch Newton IH EBM (p=0.25)": "v",
"DP-TR Newton IH EBM": "v",
"XGBoost (Non-private)": "*"
}
dataset_filter = ["adult", "Credit 1", "Credit 2", "higgs-sample"]
print(df["dataset"].unique())
df["args"] = df["args"].str.strip(" ")
df = df[df["args"].isin(filter_list)]
df = df[df["dataset"].isin(dataset_filter)]
non_dp_df = non_dp_df[non_dp_df["dataset"].isin(dataset_filter)]
# print(set(df["epsilon"].values))
# print(set(df["max_depth"].values))
# print(set(df["dataset"]))
# Filter df
# df = df[df["dataset"] == dataset]
df = df[df["max_depth"] == "4"]
df = df[df["epsilon"] == 0.5]
# non_dp_df = non_dp_df[non_dp_df["dataset"] == dataset]
non_dp_df = non_dp_df[non_dp_df["max_depth"] == "4"]
df = pd.concat([df, non_dp_df])
df = df.sort_values("args")
print(set(df["args"].values))
print(len(set(df["args"].values)))
print(df.columns)
# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
cm = None
print(set(df["args"].values))
print(len(filter_list)+1)
cm = None
cm = sns.color_palette("deep", len(filter_list)+1)
cm[-1] = "black"
cm[5] = "#933136"
cm[7] = "#2C760A"
cm_map = {df["args"].unique()[i] : col for i, col in enumerate(cm)}
print(cm_map)
df['dataset'] = df['dataset'].replace(['nomao'], 'Nomao')
df['dataset'] = df['dataset'].replace(['higgs-sample'], 'Higgs')
df['dataset'] = df['dataset'].replace(['adult'], 'Adult')
df = df.sort_values(["args", "dataset"])
# sns.scatterplot(data=df, x="dataset", y="test_auc", size="num_trees", hue="args", legend=True, palette=cm, sizes=(20, 2000))
ax = sns.stripplot(data=df, x="dataset", y="test_auc", hue="args", palette=cm, jitter=0.5)
plt.tight_layout()
ax.legend(loc='upper left', bbox_to_anchor=(1, 0.5))
ax.set_xlabel("Dataset")
ax.set_ylabel("Test AUC")
plt.savefig(out_path + "bubble_plot_full.pdf", bbox_inches="tight")
plt.clf()
group = df.groupby(by=["args", "dataset", "num_trees"]).mean().reset_index()
exclude_list = ["DP-TR Batch Newton IH EBM (p=1)", "DP-TR Newton IH", 'DP-EBM Newton']
for arg in exclude_list:
group = group[group["args"] != arg]
ax = None
plt.figure(figsize=(6,5))
for i, arg in enumerate(group["args"].unique()):
filter_df = group[group["args"] == arg]
# ax = sns.stripplot(data=filter_df, x="dataset", y="test_auc", color=cm_map[arg], marker=markers[arg], dodge=True, ax=ax, alpha=0.75)
ax = sns.stripplot(data=filter_df, x="dataset", y="test_auc", color=cm_map[arg], marker=markers[arg], jitter=0.2, ax=ax, alpha=0.75)
plt.tight_layout()
handles = []
for i, arg in enumerate(group["args"].unique()):
handles.append(mlines.Line2D([], [], color=cm_map[arg], marker=markers[arg], linestyle='None',
markersize=5, label=arg))
legend = plt.legend(handles=handles, loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=2)
# legend = plt.legend(handles=handles, loc='upper center', bbox_to_anchor=(1.6, 1), ncol=1)
legend = legend.get_frame().set_alpha(None)
# plt.legend(handles=handles, ncol=2, bbox_to_anchor=(0.1, 1.1))
# plt.ylim(0.5,1)
plt.ylim(0.52,0.98)
ax.set_xlabel("Dataset")
ax.set_ylabel("Test AUC")
plt.savefig(out_path + "bubble_plot_tree_mean.pdf", bbox_inches="tight")
plt.clf()
def rank_table(in_path=None, replication=False,
reduced=False):
set_fontsize(11)
if not replication:
df = pd.read_csv("./paper_results/E6_comparisons_fix.csv")
df2 = pd.read_csv("./paper_results/E6_comparisons_IH.csv")
df = df[~df["args"].str.contains("adaptive_hessian")]
df = df[~df["args"].str.contains("batched_boosting")]
df = pd.concat([df,df2])
non_dp_df = pd.read_csv("./paper_results/non_dp_comparisons.csv")
else:
df = pd.read_csv(in_path)
non_dp_df = pd.read_csv("../paper_experiments/paper_results/non_dp_comparisons.csv")
df = df.reset_index()
df = process_df_for_plotting(df)
non_dp_df = process_df_for_plotting(non_dp_df)
non_dp_df = non_dp_df[non_dp_df["args"].str.contains("exact_quantiles")]
df["args"] = df["split_method"] + df["sketch_type"] + \
+ df["weight_update"] + df["training_method"] +\
df["feature_interaction_method"] + df["feature_interaction_k"]
df["args"] = df["args"].str.replace(" ", "")
df["args"] = df["args"].str.replace("'", "")
non_dp_df["args"] = non_dp_df["args"].str.replace("'", "")
arg_map = {'hist_basedfeverlessxgboostboostingstandardd' : "FEVERLESS (sketch)",
'totally_randomuniformbatched_boostingcyclical1' : "DP-TR Batch EBM",
'totally_randomadaptive_hessianbatched_boostingstandardd' : "DP-TR Batch IH",
'totally_randomuniformgbmboostingcyclical1' : "DP-EBM",
'totally_randomuniformxgboostboostingrandom2' : "DP-TR Newton (Random k=2)",
'totally_randomuniformxgboostboostingstandardd' : "DP-TR Newton",
'totally_randomadaptive_hessianxgboostboostingstandardd' : "DP-TR Newton IH",
'totally_randomuniformrfrfstandardd' : "DP-RF",
'totally_randomuniformbatched_boostingstandardd' : "DP-TR Batch",
'totally_randomuniformxgboostboostingrandom5' : "DP-TR Newton (Random k=5)",
'hist_baseduniformgbmboostingstandardd' : "DP-GBM",
'totally_randomuniformxgboostboostingcyclical2' : "DP-TR Newton (Cyclical k=2)",
'totally_randomuniformxgboostboostingcyclical1' : "DP-EBM Newton",
'hist_baseduniformxgboostboostingstandardd' : "FEVERLESS (uniform)",
'totally_randomadaptive_hessianbatched_boostingcyclical1': "DP-TR Batch Newton IH EBM",
'totally_randomadaptive_hessianxgboostboostingcyclical1': "DP-TR Newton IH EBM",
}
df = df.replace({"args":arg_map})
non_dp_df = non_dp_df.replace({"args": arg_map})
non_dp_df["args"] = "XGBoost (Non-private)"
a = 0.05
df["batch_perc"] = round(df["batch_perc"]/ a) * a
df["batch_perc"] = df["batch_perc"].astype("str")
df["batch_perc"][df["batch_perc"] == "0.3"] = "0.25" # It got rounded up...
df["batch_perc"][df["batch_perc"] == "0.25"] = "(p=0.25)"
df["batch_perc"][~df["args"].str.contains("Batch")] = ""
df["batch_perc"][df["batch_perc"] == "1.0"] = "(p=1)"
# print(df["batch_perc"])
df["args"] += " " + df["batch_perc"].astype("str")
df["num_trees"] = df["num_trees"].astype('int')
print(set(df["args"].values))
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton",
"DP-TR Newton IH",
"DP-TR Batch IH (p=0.25)",
"DP-TR Batch IH (p=1)",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM"]
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
# "DP-TR Newton",
"DP-TR Newton IH",
# "DP-TR Batch IH (p=0.25)",
# "DP-TR Batch IH (p=1)",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM"]
if reduced:
filter_list = ["FEVERLESS (uniform)",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton IH",
"DP-TR BatchBoost IH 0.25",
"DP-TR BatchBoost IH"]
styles = {
"FEVERLESS (uniform)": "",
"DP-GBM": "",
"DP-RF": "",
"DP-EBM": (1, 1),
"DP-EBM Newton": (1, 1),
"DP-TR Newton IH": (4, 1.5),
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": ""
}
styles = {
"FEVERLESS (uniform)": (4, 1.5),
"DP-GBM": (4, 1.5),
"DP-RF": (4, 1.5),
"DP-EBM": (4, 1.5),
"DP-EBM Newton": "",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": "",
"DP-TR Batch Newton IH EBM (p=0.25)": "",
"DP-TR Newton IH EBM": "",
"XGBoost (Non-private)": (4, 1.5)
}
markers = {
"FEVERLESS (uniform)": ".",
"DP-GBM": ".",
"DP-RF": ".",
"DP-EBM": "v",
"DP-EBM Newton": "v",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": ""
}
df["args"] = df["args"].str.strip(" ")
df = df[df["args"].isin(filter_list)]
# print(set(df["epsilon"].values))
# print(set(df["max_depth"].values))
# print(set(df["dataset"]))
# Filter df
# df = df[df["max_depth"] == "4"]
df = df.sort_values("args")
print(set(df["args"].values))
print(len(set(df["args"].values)))
print(df.columns)
# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
cm = None
print(set(df["args"].values))
print(len(filter_list)+1)
df['dataset'] = df['dataset'].replace(['nomao'], 'Nomao')
df['dataset'] = df['dataset'].replace(['higgs-sample'], 'Higgs')
df['dataset'] = df['dataset'].replace(['adult'], 'Adult')
cm = None
cm = sns.color_palette("deep", len(filter_list)+1)
cm[-1] = "black"
cm[5] = "#933136"
cm[7] = "#2C760A"
data, max_data = [], []
for eps in df["epsilon"].unique():
col_data, col_max_data = [], []
arg_rank_map = defaultdict(list)
max_arg_rank_map = defaultdict(list)
for dataset in df["dataset"].unique():
print(f"Dataset {dataset}")
temp_df = df[df["epsilon"] == eps]
temp_df = temp_df[temp_df["dataset"] == dataset]
group_rank = temp_df[["args", "test_auc"]].groupby("args").mean().rank(ascending=False).reset_index()
max_rank = temp_df[["args", "test_auc"]].groupby("args").max().rank(ascending=False).reset_index()
print(group_rank)
for arg in group_rank["args"].unique():
rank = group_rank[group_rank["args"] == arg]["test_auc"].values[0]
arg_rank_map[arg].append(rank)
for arg in max_rank["args"].unique():
rank = max_rank[max_rank["args"] == arg]["test_auc"].values[0]
max_arg_rank_map[arg].append(rank)
for arg, ranks in arg_rank_map.items():
col_data.append([arg, eps, str(round(np.mean(ranks),2))])
for arg, ranks in max_arg_rank_map.items():
col_max_data.append([arg, eps, str(round(np.mean(ranks),2))])
# Bold max in column
min_data_rank = float("inf")
min_rank_val = float("inf")
for i, tup in enumerate(col_data):
print(float(tup[2]), float(min_data_rank))
if float(tup[2]) < float(min_rank_val):
min_data_rank = i
min_rank_val = tup[2]
col_data[min_data_rank][2] = "\\textbf{" + str(col_data[min_data_rank][2]) + "}"
for i, tup in enumerate(col_max_data):
if float(tup[2]) < float(min_rank_val):
min_data_rank = i
min_rank_val = tup[2]
col_max_data[min_data_rank][2] = "\\textbf{" + str(col_max_data[min_data_rank][2]) + "}"
data.extend(col_data)
max_data.extend(col_max_data)
rank_df = pd.DataFrame(data, columns=["args", "eps", "rank"])
max_rank_df = pd.DataFrame(max_data, columns=["args", "eps", "rank"])
mean_rank_table = rank_df.groupby(["args", "eps"]).agg(lambda x : x).unstack(level=1)
max_rank_table = max_rank_df.groupby(["args", "eps"]).agg(lambda x : x).unstack(level=1)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(mean_rank_table)
print(max_rank_table)
print(mean_rank_table.to_latex(escape=False))
print(max_rank_table.to_latex(escape=False))
def synthetic_comm(out_path="./paper_plots/"):
set_fontsize(11)
df = pd.read_csv("./paper_results/synthetic_comm.csv")
df["args"] = df["args"].astype("str")
print(df)
print(df.columns)
df = process_df_for_plotting(df)
print("df processed...")
df["args"] = df["dp_method"] + df["split_method"] + df["sketch_type"] + \
+ df["weight_update"] + df["training_method"] +\
df["feature_interaction_method"] + df["feature_interaction_k"]
df["args"] = df["args"].str.replace(" ", "")
df["args"] = df["args"].str.replace("'", "")
arg_map = {'DPhist_basedfeverlessxgboostboostingstandardd' : "FEVERLESS (sketch)",
'DPtotally_randomuniformbatched_boostingcyclical1' : "DP-TR Batch EBM",
'DPtotally_randomadaptive_hessianbatched_boostingstandardd' : "DP-TR Batch IH",
'DPtotally_randomuniformgbmboostingcyclical1' : "DP-EBM",
'DPtotally_randomuniformxgboostboostingrandom2' : "DP-TR Newton (Random k=2)",
'DPtotally_randomuniformxgboostboostingstandardd' : "DP-TR Newton",
'gaussian_ldphist_baseduniformxgboostboostingstandardd' : "LDP",
'gaussian_ldptotally_randomuniformxgboostboostingstandardd' : "LDP-TR",
'DPtotally_randomadaptive_hessianxgboostboostingstandardd' : "DP-TR Newton IH",
'DPtotally_randomuniformrfrfstandardd' : "DP-RF",
'DPtotally_randomuniformbatched_boostingstandardd' : "DP-TR Batch",
'DPtotally_randomuniformxgboostboostingrandom5' : "DP-TR Newton (Random k=5)",
'DPhist_baseduniformgbmboostingstandardd' : "DP-GBM",
'DPtotally_randomuniformxgboostboostingcyclical2' : "DP-TR Newton (Cyclical k=2)",
'DPtotally_randomuniformxgboostboostingcyclical1' : "DP-EBM Newton",
'DPhist_baseduniformxgboostboostingstandardd' : "FEVERLESS (uniform)",
'DPtotally_randomadaptive_hessianbatched_boostingcyclical1': "DP-TR Batch Newton IH EBM",
'DPtotally_randomadaptive_hessianxgboostboostingcyclical1': "DP-TR Newton IH EBM",
}
df = df.replace({"args":arg_map})
a = 0.05
df["batch_perc"] = round(df["batch_perc"]/ a) * a
df["batch_perc"] = df["batch_perc"].astype("str")
df["batch_perc"][df["batch_perc"] == "0.3"] = "0.25" # It got rounded up...
df["batch_perc"][df["batch_perc"] == "0.25"] = "(p=0.25)"
df["batch_perc"][~df["args"].str.contains("Batch")] = ""
df["batch_perc"][df["batch_perc"] == "1.0"] = "(p=1)"
df["args"] += " " + df["batch_perc"].astype("str")
df["num_trees"] = df["num_trees"].astype('int')
print(f"Unique args {df['args'].unique}")
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton",
"DP-TR Newton IH",
"DP-TR Batch IH (p=0.25)",
"DP-TR Batch IH (p=1)",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM"]
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton",
"DP-TR Newton IH",
"DP-TR Newton IH EBM"]
# filter_list = [
# "DP-TR Batch IH (p=0.25)",
# "DP-TR Batch IH (p=1)",
# "DP-TR Batch Newton IH EBM (p=1)",
# "DP-TR Batch Newton IH EBM (p=0.25)",]
#
# filter_list = ["FEVERLESS (uniform)",
# "DP-GBM",
# "DP-RF",
# "DP-EBM",
# "DP-EBM Newton",
# "DP-TR Newton IH",
# "DP-TR Batch Newton IH EBM (p=1)",
# "DP-TR Batch Newton IH EBM (p=0.25)",
# "DP-TR Newton IH EBM",
# "LDP"]
styles = {
"FEVERLESS (uniform)": "",
"DP-GBM": "",
"DP-RF": "",
"DP-EBM": (1, 1),
"DP-EBM Newton": (1, 1),
"DP-TR Newton IH": (4, 1.5),
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": "",
"LDP": ""
}
styles = {
"FEVERLESS (uniform)": (4, 1.5),
"DP-GBM": (4, 1.5),
"DP-RF": (4, 1.5),
"DP-EBM": (4, 1.5),
"DP-EBM Newton": "",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": "",
"DP-TR Batch Newton IH EBM (p=0.25)": "",
"DP-TR Newton IH EBM": "",
"XGBoost (Non-private)": (4, 1.5),
"LDP": (4, 1.5)
}
markers = {
"FEVERLESS (uniform)": ".",
"DP-GBM": ".",
"DP-RF": ".",
"DP-EBM": "v",
"DP-EBM Newton": "v",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": ""
}
df["args"] = df["args"].str.strip(" ")
df = df[df["args"].isin(filter_list)]
# Filter df
# df = df[df["max_depth"] == "4"]
df = df.sort_values("args")
print(set(df["args"].values))
print(len(set(df["args"].values)))
print(df.columns)
print(df["max_depth"].unique())
cm = None
cm = sns.color_palette("deep", len(filter_list)+1)
# cm[-1] = "black"
# cm[5] = "#933136"
# cm[7] = "#2C760A"
df["total_client_rounds_sent_payload"] = df["total_client_rounds_sent_payload"]/(1024**2)
# ax = sns.lineplot(data=df, x="num_trees", y="total_client_rounds_sent_payload", hue="args", ci=99)
# Min/max bands
ax = sns.lineplot(data=df, x="num_trees", y="total_client_rounds_sent_payload", hue="args", ci=None)
for i, arg in enumerate(df["args"].unique()):
filter_df = df[df["args"] == arg].sort_values("num_trees")
max_df = df[df["args"] == arg].groupby("num_trees").max()["total_client_rounds_sent_payload"]
min_df = df[df["args"] == arg].groupby("num_trees").min()["total_client_rounds_sent_payload"]
ax.fill_between(filter_df["num_trees"].unique(), min_df.values, max_df.values, color=cm[i], alpha=0.2)
plt.ylabel("Total communication cost (Mb)")
plt.xlabel("Number of trees (T)")
ax.set_yscale("log")
# leg = plt.legend(loc=(0.5,0.5), title="Method")
leg = plt.legend(loc=(0.05,1.05), title="Method", ncol=2)
ax.add_artist(leg)
plt.savefig(out_path + f"comm.pdf", bbox_inches="tight")
plt.clf()
ax = sns.lineplot(data=df, x="num_trees", y="total_client_rounds_sent_payload", hue="args")
plt.ylabel("Total communication cost (Mb)")
# ax.set_yscale("log")
plt.ylim(0, 0.5)
plt.savefig(out_path + f"comm_ZOOM.pdf", bbox_inches="tight")
plt.clf()
print(df.groupby(["num_trees", "args"]).max()["total_client_rounds_sent_payload"])
def vary_clients(out_path="./paper_plots/"):
set_fontsize(11)
df1 = pd.read_csv("./paper_results/vary_clients3.csv")
df2 = pd.read_csv("./paper_results/vary_clients4.csv")
df = pd.concat([df1,df2])
df = df.reset_index()
df["args"] = df["args"].astype("str")
print(df)
print(df.columns)
df = process_df_for_plotting(df)
print("df processed...")
df["args"] = df["dp_method"] + df["split_method"] + df["sketch_type"] + \
+ df["weight_update"] + df["training_method"] +\
df["feature_interaction_method"] + df["feature_interaction_k"]
df["args"] = df["args"].str.replace(" ", "")
df["args"] = df["args"].str.replace("'", "")
print(df["n"].unique())
arg_map = {'DPhist_basedfeverlessxgboostboostingstandardd' : "FEVERLESS (sketch)",
'DPtotally_randomuniformbatched_boostingcyclical1' : "DP-TR Batch EBM",
'DPtotally_randomadaptive_hessianbatched_boostingstandardd' : "DP-TR Batch IH",
'DPtotally_randomuniformgbmboostingcyclical1' : "DP-EBM",
'DPtotally_randomuniformxgboostboostingrandom2' : "DP-TR Newton (Random k=2)",
'DPtotally_randomuniformxgboostboostingstandardd' : "DP-TR Newton",
'gaussian_ldphist_baseduniformxgboostboostingstandardd' : "LDP",
'gaussian_ldptotally_randomuniformxgboostboostingstandardd' : "LDP-TR",
'DPtotally_randomadaptive_hessianxgboostboostingstandardd' : "DP-TR Newton IH",
'DPtotally_randomuniformrfrfstandardd' : "DP-RF",
'DPtotally_randomuniformbatched_boostingstandardd' : "DP-TR Batch",
'DPtotally_randomuniformxgboostboostingrandom5' : "DP-TR Newton (Random k=5)",
'DPhist_baseduniformgbmboostingstandardd' : "DP-GBM",
'DPtotally_randomuniformxgboostboostingcyclical2' : "DP-TR Newton (Cyclical k=2)",
'DPtotally_randomuniformxgboostboostingcyclical1' : "DP-EBM Newton",
'DPhist_baseduniformxgboostboostingstandardd' : "FEVERLESS (uniform)",
'DPtotally_randomadaptive_hessianbatched_boostingcyclical1': "DP-TR Batch Newton IH EBM",
'DPtotally_randomadaptive_hessianxgboostboostingcyclical1': "DP-TR Newton IH EBM",
"hist_basedexact_quantilesxgboostboostingstandardd": "XGBoost"
}
df = df.replace({"args":arg_map})
a = 0.05
df["batch_perc"] = round(df["batch_perc"]/ a) * a
df["batch_perc"] = df["batch_perc"].astype("str")
df["batch_perc"][df["batch_perc"] == "0.3"] = "0.25" # It got rounded up...
df["batch_perc"][df["batch_perc"] == "0.25"] = "(p=0.25)"
df["batch_perc"][~df["args"].str.contains("Batch")] = ""
df["batch_perc"][df["batch_perc"] == "1.0"] = "(p=1)"
df["args"] += " " + df["batch_perc"].astype("str")
df["num_trees"] = df["num_trees"].astype('int')
print(f"Unique args {df['args'].unique}")
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton",
"DP-TR Newton IH",
"DP-TR Batch IH (p=0.25)",
"DP-TR Batch IH (p=1)",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM"]
filter_list = [ "DP-TR Newton",
"DP-TR Newton IH", "XGBoost"]
# filter_list = [
# "DP-TR Batch IH (p=0.25)",
# "DP-TR Batch IH (p=1)",
# "DP-TR Batch Newton IH EBM (p=1)",
# "DP-TR Batch Newton IH EBM (p=0.25)",]
#
# filter_list = ["FEVERLESS (uniform)",
# "DP-GBM",
# "DP-RF",
# "DP-EBM",
# "DP-EBM Newton",
# "DP-TR Newton IH",
# "DP-TR Batch Newton IH EBM (p=1)",
# "DP-TR Batch Newton IH EBM (p=0.25)",
# "DP-TR Newton IH EBM",
# "LDP"]
styles = {
"FEVERLESS (uniform)": "",
"DP-GBM": "",
"DP-RF": "",
"DP-EBM": (1, 1),
"DP-EBM Newton": (1, 1),
"DP-TR Newton IH": (4, 1.5),
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": "",
"LDP": ""
}
styles = {
"FEVERLESS (uniform)": (4, 1.5),
"DP-GBM": (4, 1.5),
"DP-RF": (4, 1.5),
"DP-EBM": (4, 1.5),
"DP-EBM Newton": "",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": "",
"DP-TR Batch Newton IH EBM (p=0.25)": "",
"DP-TR Newton IH EBM": "",
"XGBoost (Non-private)": (4, 1.5),
"LDP": (4, 1.5)
}
markers = {
"FEVERLESS (uniform)": ".",
"DP-GBM": ".",
"DP-RF": ".",
"DP-EBM": "v",
"DP-EBM Newton": "v",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": ""
}
df["args"] = df["args"].str.strip(" ")
df = df[df["args"].isin(filter_list)]
# Filter df
df = df[df["num_trees"] == 100]
df = df.sort_values(["args", "hist_bin"])
# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# test_df = df[df["args"] == "DP-TR Newton"]
# test_df = test_df[test_df["hist_bin"] == 64]
# test_df = test_df[test_df["n"] == 7000]
# print(test_df)
df["args"] = df["args"] + " Q=" + df["hist_bin"].astype("str")
print(set(df["args"].values))
print(len(set(df["args"].values)))
print(df.columns)
print(df["max_depth"].unique())
cm = None
reds = sns.color_palette("Reds", 9)
blues = sns.color_palette("Blues", 9)
greens = sns.color_palette("Greens", 9)
blacks = sns.color_palette("Greys", 9)
cm = reds + blues + greens
# cm[-1] = "black"
# cm[5] = "#933136"
# cm[7] = "#2C760A"
ax = sns.lineplot(data=df, x="n", y="test_auc", hue="args", palette=cm)
plt.ylabel("Test AUC")
# axins = ax.inset_axes([0.5, 0.1, 0.45, 0.4])
axins = ax.inset_axes([0.3, 0.1, 0.45, 0.4])
sns.lineplot(data=df, x="n", y="test_auc", hue="args", palette=cm, ax=axins)
axins.set_xlim(60000, 70000)
axins.set_ylim(0.88)
axins.set_ylabel(None)
axins.set_xlabel(None)
axins.set_title('Zoomed')
axins.grid(False)
axins.get_legend().remove()
marker_titles = ["2", "4", "8", "16", "32", "64", "128", "256", "512"]
h = [plt.plot([],[], color=blacks[i])[0] for i in range(0,9)]
# leg = plt.legend(handles=h, labels=marker_titles, title="Q", loc=(1.03,0))
leg = plt.legend(handles=h, labels=marker_titles, title="Q", loc=(0.83,0.03))
leg.get_frame().set_alpha(None)
ax.add_artist(leg)
marker_titles = filter_list
colors = [reds[-1], blues[-1], greens[-1]]
h = [plt.plot([],[], color=colors[i])[0] for i in range(0,3)]
# plt.legend(handles=h, labels=marker_titles, loc=(1.03,0.7), title="Method")
plt.legend(handles=h, labels=marker_titles, loc=(-0.05,1.05), title="Method", ncol=3)
plt.xlabel("Number of Clients ($n$)")
path = out_path + f"vary_clients.pdf"
print(df["n"].unique())
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
group_df = df[df["hist_bin"] == 64].groupby(["args", "n"]).mean()["test_auc"]
print(group_df)
print(f"Saving to {path}")
plt.savefig(path, bbox_inches="tight")
plt.clf()
def computation_benchmark(out_path="./paper_plots/"):
set_fontsize(11)
df = pd.read_csv("./paper_results/computation_benchmark.csv")
df["args"] = df["args"].astype("str")
print(df)
print(df.columns)
df = process_df_for_plotting(df)
print("df processed...")
df["args"] = df["dp_method"] + df["split_method"] + df["sketch_type"] + \
+ df["weight_update"] + df["training_method"] +\
df["feature_interaction_method"] + df["feature_interaction_k"]
df["args"] = df["args"].str.replace(" ", "")
df["args"] = df["args"].str.replace("'", "")
print(df["n"].unique())
arg_map = {'DPhist_basedfeverlessxgboostboostingstandardd' : "FEVERLESS (sketch)",
'DPtotally_randomuniformbatched_boostingcyclical1' : "DP-TR Batch EBM",
'DPtotally_randomadaptive_hessianbatched_boostingstandardd' : "DP-TR Batch IH",
'DPtotally_randomuniformgbmboostingcyclical1' : "DP-EBM",
'DPtotally_randomuniformxgboostboostingrandom2' : "DP-TR Newton (Random k=2)",
'DPtotally_randomuniformxgboostboostingstandardd' : "DP-TR Newton",
'gaussian_ldphist_baseduniformxgboostboostingstandardd' : "LDP",
'gaussian_ldptotally_randomuniformxgboostboostingstandardd' : "LDP-TR",
'DPtotally_randomadaptive_hessianxgboostboostingstandardd' : "DP-TR Newton IH",
'DPtotally_randomuniformrfrfstandardd' : "DP-RF",
'DPtotally_randomuniformbatched_boostingstandardd' : "DP-TR Batch",
'DPtotally_randomuniformxgboostboostingrandom5' : "DP-TR Newton (Random k=5)",
'DPhist_baseduniformgbmboostingstandardd' : "DP-GBM",
'DPtotally_randomuniformxgboostboostingcyclical2' : "DP-TR Newton (Cyclical k=2)",
'DPtotally_randomuniformxgboostboostingcyclical1' : "DP-EBM Newton",
'DPhist_baseduniformxgboostboostingstandardd' : "FEVERLESS (uniform)",
'DPtotally_randomadaptive_hessianbatched_boostingcyclical1': "DP-TR Batch Newton IH EBM",
'DPtotally_randomadaptive_hessianxgboostboostingcyclical1': "DP-TR Newton IH EBM",
"hist_basedexact_quantilesxgboostboostingstandardd": "XGBoost"
}
df = df.replace({"args":arg_map})
a = 0.05
df["batch_perc"] = round(df["batch_perc"]/ a) * a
df["batch_perc"] = df["batch_perc"].astype("str")
df["batch_perc"][df["batch_perc"] == "0.3"] = "0.25" # It got rounded up...
df["batch_perc"][df["batch_perc"] == "0.25"] = "(p=0.25)"
df["batch_perc"][~df["args"].str.contains("Batch")] = ""
df["batch_perc"][df["batch_perc"] == "1.0"] = "(p=1)"
df["args"] += " " + df["batch_perc"].astype("str")
df["num_trees"] = df["num_trees"].astype('int')
print(f"Unique args {df['args'].unique}")
filter_list = ["FEVERLESS (uniform)",
"DP-GBM",
"DP-RF",
"DP-EBM",
"DP-EBM Newton",
"DP-TR Newton",
"DP-TR Newton IH",
"DP-TR Batch IH (p=0.25)",
"DP-TR Batch IH (p=1)",
"DP-TR Batch Newton IH EBM (p=1)",
"DP-TR Batch Newton IH EBM (p=0.25)",
"DP-TR Newton IH EBM"]
# filter_list = [ "DP-TR Newton",
# "DP-TR Newton IH", "XGBoost"]
# filter_list = [
# "DP-TR Batch IH (p=0.25)",
# "DP-TR Batch IH (p=1)",
# "DP-TR Batch Newton IH EBM (p=1)",
# "DP-TR Batch Newton IH EBM (p=0.25)",]
#
# filter_list = ["FEVERLESS (uniform)",
# "DP-GBM",
# "DP-RF",
# "DP-EBM",
# "DP-EBM Newton",
# "DP-TR Newton IH",
# "DP-TR Batch Newton IH EBM (p=1)",
# "DP-TR Batch Newton IH EBM (p=0.25)",
# "DP-TR Newton IH EBM",
# "LDP"]
styles = {
"FEVERLESS (uniform)": "",
"DP-GBM": "",
"DP-RF": "",
"DP-EBM": (1, 1),
"DP-EBM Newton": (1, 1),
"DP-TR Newton IH": (4, 1.5),
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": "",
"LDP": ""
}
styles = {
"FEVERLESS (uniform)": (4, 1.5),
"DP-GBM": (4, 1.5),
"DP-RF": (4, 1.5),
"DP-EBM": (4, 1.5),
"DP-EBM Newton": "",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": "",
"DP-TR Batch Newton IH EBM (p=0.25)": "",
"DP-TR Newton IH EBM": "",
"XGBoost (Non-private)": (4, 1.5),
"LDP": (4, 1.5)
}
markers = {
"FEVERLESS (uniform)": ".",
"DP-GBM": ".",
"DP-RF": ".",
"DP-EBM": "v",
"DP-EBM Newton": "v",
"DP-TR Newton IH": "",
"DP-TR Batch Newton IH EBM (p=1)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Batch Newton IH EBM (p=0.25)": (3, 1.25, 1.3, 1.25, 1.3, 1.3, 1.25, 1.3),
"DP-TR Newton IH EBM": (3, 1.25, 1.5, 1.25),
"XGBoost (Non-private)": ""
}
df["args"] = df["args"].str.strip(" ")
df = df[df["args"].isin(filter_list)]
df = df[df["num_trees"].isin([75, 100, 125])]
# Filter df
df = df.sort_values(["args", "hist_bin"])
# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# test_df = df[df["args"] == "DP-TR Newton"]
# test_df = test_df[test_df["hist_bin"] == 64]
# test_df = test_df[test_df["n"] == 7000]
# print(test_df)
# df["args"] = df["args"] + " Q=" + df["hist_bin"].astype("str")
print(set(df["args"].values))
print(len(set(df["args"].values)))
print(df["max_depth"].unique())
cm = None
print(df.columns)
filter_col = [col for col in df if col.startswith('t_client')]
cm = sns.color_palette("deep", len(filter_list))
cm[5] = "#933136"
cm[7] = "#2C760A"
# new_df_list = []
# for arg in df["args"].values:
# filter_df = df[df["args"] == arg]
# for col in filter_col:
# for val in filter_df[col].values:
# new_df_list.append([arg, col, val])
#
# new_df = pd.DataFrame(new_df_list, columns=["args", "checkpoint", "time"])
#
# print(new_df)
# print(new_df.columns)
# sns.barplot(data=new_df, y="checkpoint", x="time", hue="args")
# plt.xlabel("Time (seconds)")
# plt.tight_layout()
# path = out_path + "/client_checkpoint.pdf"
# print(f"Saving to {path}")
# plt.savefig(path, bbox_inches="tight")
# plt.clf()
df["total_client_time"] = df["t_client_histogram_building"] + df["t_client_computing_gradients"] + df["t_client_initialise_private_histogram"] +df["t_client_forming_grad_histogram"] + df["t_client_retrieving_grads_for_node"]
sns.barplot(data=df, y="args", x="total_client_time", palette=cm)
plt.xlabel("Total Time (seconds)")
plt.ylabel("Methods")
plt.tight_layout()
path = out_path + "/total_client_computation.pdf"
print(f"Saving to {path}")
plt.savefig(path, bbox_inches="tight")
plt.clf()
df["total_server_time"] = df["t_server_initial_split_candidates"] + df["t_server_privacy_accountant_initialisation"] + df["t_server_init_model_weights"] + df["t_server_split_candidates"] + df["t_server_pre_tree_ops"] + df["t_server_post_tree ops"] + df["t_server_initialise_priv_hist"] + df["t_server_adding_noise_to_hist"] + df["t_server_sampling_features"] + df["t_server_calculating_internal_split"] + df["t_server_split_constraints"] + df["t_server_leaf_weight"]
sns.barplot(data=df, y="args", x="total_server_time", palette=cm)
plt.xlabel("Total Time (seconds)")
plt.ylabel("Methods")
plt.tight_layout()
path = out_path + "/total_server_computation.pdf"
print(f"Saving to {path}")
plt.savefig(path, bbox_inches="tight")
plt.clf()
# ============================================= Appendix =============================================
# datasets = ["Credit 1", "Credit 2", "adult", "Bank", "nomao"]
datasets = ["Credit 2", "adult", "Bank", "nomao"]
# Appendix - Split methods
# Used to plot figures 7-10
def appendix_E1():
for dataset in datasets:
# clear_dir(base_path+dataset+"/E1")
plot_split_methods_with_update(base_path, dataset=dataset, show_dp=False, legends=[False, False, True])
# Appendix - Split methods + weight updates table
# Used to plot tables 7-10
def appendix_E1_table():
epsilons = 0.1, 0.25, 0.75, 1
for eps in epsilons:
print("EPS", eps)
table_split_methods_with_update(eps)
# Used to plot Figure 12
def appendix_E2(dataset="Credit 1", depth="4", epsilon=1,):
datasets = ["Credit 2", "adult", "nomao", "Bank", "higgs-sample"]
for dataset in datasets:
# clear_dir(base_path+dataset+"/E2")
ylim=0.7
if dataset == "higgs-sample":
ylim = 0.5
if dataset == "nomao":
ylim = 0.87
if dataset == "adult":
ylim = 0.81
plot_split_candidates(base_path, dataset=dataset, ylim=ylim)
# Not used
def appendix_E3():
for dataset in datasets:
# clear_dir(base_path+dataset+"/E3")
plot_k_way(dataset=dataset)
plot_non_dp_ebm(dataset=dataset)
plot_ebm_comparisons(dataset=dataset)
# Used to plot Figure 13
def appendix_E4():
for dataset in datasets:
# clear_dir(base_path+dataset+"/E4")
plot_low_eps_bb(dataset=dataset)
# Appendix - Comparisons
# Used to plot figures 14-18
def appendix_E5():
# datasets = ["Credit 2", "adult"]
datasets = ["Credit 2", "adult", "nomao", "Bank", "higgs-sample"]
for dataset in datasets:
ylim2 = 0.82
if dataset == "higgs-sample":
ylim2 = 0.65
elif dataset == "Credit 2":
ylim2 = 0.7
elif dataset == "adult":
ylim2 = 0.85
# clear_dir(base_path+dataset+"/E5")
plot_comparisons(dataset=dataset, ylim2=ylim2)
# exp_plot() # test
# ============================================= Plotting Funcs =============================================
base_path = "./paper_plots/"
set_fontsize()
# ---------- REVISIONS ----------
# comparison_bubble_plot()
# rank_table()
# synthetic_comm()
# vary_clients()
# computation_benchmark()
comparison_bubble_plot()
# ---------- MAIN PAPER TABLES ----------
# table_split_methods_with_update() # Table 2
# table_split_candidate() # Table 3
# table_low_eps_bb() # Table 4
# table_comparisons() # Not used
# ---------- MAIN PAPER PLOTS ----------
# plot_split_methods_with_update(y_lims=[None, 0.67, None], show_dp=False) # Figure 1
# plot_split_candidates() # Figure 2
# plot_k_way(filepath=base_path) # Figure 3
# plot_ebm_comparisons() # Figure 4
# plot_low_eps_bb() # Figure 5
# plot_comparisons(reduced=False) # Figure 6
# plot_non_dp_ebm() # Not used
# ---------- APPENDIX ----------
# appendix_E1()
# appendix_E1_table()
# appendix_E2()
# appendix_E3()
# appendix_E4()
# appendix_E5()
# ================== Not used.... ==================
# plot_grad_budgets()
# boosting_rf_plot()
# ebm_top_bottom_plot() | 112,721 | 38.704826 | 472 | py |
drizzlepac | drizzlepac-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# STSCI documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 22 17:25:41 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# Check Sphinx version
import os
import sys
import sphinx
import tomli
from pathlib import Path
from packaging.version import Version
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
def setup(app):
app.add_css_file('stsci.css')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('../'))
# sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../../.eggs'))
sys.path.insert(0, os.path.abspath('../../src/'))
# sys.path.insert(0, os.path.abspath('../'))
# sys.path.insert(0, os.path.abspath('packagename/'))
# sys.path.insert(0, os.path.abspath('exts/'))
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
# conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
with open(Path(__file__).parent.parent.parent / "pyproject.toml", "rb") as configuration_file:
conf = tomli.load(configuration_file)
setup_cfg = conf['project']
def check_sphinx_version(expected_version):
sphinx_version = Version(sphinx.__version__)
expected_version = Version(expected_version)
if sphinx_version < expected_version:
raise RuntimeError(
"At least Sphinx version {0} is required to build this "
"documentation. Found {1}.".format(
expected_version, sphinx_version))
# Configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/3/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.org/', None),
'astropy': ('http://docs.astropy.org/en/stable/', None),
'tweakwcs': ('https://tweakwcs.readthedocs.io/en/latest/', None),
'stsci.skypac': ('https://stsci-skypac.readthedocs.io/en/latest/', None),
'stwcs': ('https://stwcs.readthedocs.io/en/latest/', None),
}
if sys.version_info[0] == 2:
intersphinx_mapping['python'] = ('http://docs.python.org/2/', None)
# intersphinx_mapping['pythonloc'] = (
# 'http://docs.python.org/',
# os.path.abspath(os.path.join(os.path.dirname(__file__),
# 'local/python2_local_links.inv')))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'numpydoc',
'sphinx_automodapi.automodapi',
'sphinx_automodapi.automodsumm',
'sphinx_automodapi.autodoc_enhancements',
'sphinx_automodapi.smart_resolver',
]
if on_rtd:
extensions.append('sphinx.ext.mathjax')
elif Version(sphinx.__version__) < Version('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DrizzlePac'
copyright = u'2021, Warren Hack, Nadia Dencheva, Chris Sontag, Megan Sosey, Michael Droettboom, Mihai Cara, Michael Dulude, Michele De La Pena'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
from drizzlepac import __version__ as version
# The full version, including alpha/beta/rc tags.
# release = '1.0.6 (14-Aug-2012)'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'obj'
# Don't show summaries of the members in each class along with the
# class' docstring
numpydoc_show_class_members = False
autosummary_generate = True
automodapi_toctreedirnm = 'api'
# Class documentation should contain *both* the class docstring and
# the __init__ docstring
autoclass_content = "both"
# Render inheritance diagrams in SVG
graphviz_output_format = "svg"
graphviz_dot_args = [
'-Nfontsize=10',
'-Nfontname=Helvetica Neue, Helvetica, Arial, sans-serif',
'-Efontsize=10',
'-Efontname=Helvetica Neue, Helvetica, Arial, sans-serif',
'-Gfontsize=10',
'-Gfontname=Helvetica Neue, Helvetica, Arial, sans-serif'
]
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [stsci_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'drizzlepacdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': r"""\usepackage{enumitem} \setlistdepth{99}"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'drizzlepac.tex', u'DrizzlePac Documentation',
u'Warren Hack, \\and Nadia Dencheva, \\and Chris Sontag, '
u'\\and Megan Sosey, \\and Michael Droettboom, \\and Mihai Cara', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
latex_elements = { 'pointsize' : '11pt' }
# Enable nitpicky mode - which ensures that all references in the docs resolve.
nitpicky = True
| 10,719 | 32.08642 | 143 | py |
GNNs-for-NLP | GNNs-for-NLP-master/pytorch_gcn.py | from utils import *
import os.path as osp
import torch
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GCNConv
class KipfGCN(torch.nn.Module):
def __init__(self, data, num_class, params):
super(KipfGCN, self).__init__()
self.p = params
self.data = data
self.conv1 = GCNConv(self.data.num_features, self.p.gcn_dim, cached=True)
self.conv2 = GCNConv(self.p.gcn_dim, num_class, cached=True)
def forward(self, x, edge_index):
x = F.relu(self.conv1(x, edge_index))
x = F.dropout(x, p=self.p.dropout, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
class Main(object):
def load_data(self):
"""
Reads the data from pickle file
Parameters
----------
self.p.dataset: The path of the dataset to be loaded
Returns
-------
self.X: Input Node features
self.A: Adjacency matrix
self.num_nodes: Total nodes in the graph
self.input_dim:
"""
print("loading data")
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', self.p.data)
dataset = Planetoid(path, self.p.data, T.NormalizeFeatures())
self.num_class = dataset.num_classes
self.data = dataset[0]
def add_model(self):
model = KipfGCN(self.data, self.num_class, self.p)
model.to(self.device)
return model
def add_optimizer(self, parameters):
"""
Add optimizer for training variables
Parameters
----------
parameters: Model parameters to be learned
Returns
-------
train_op: Training optimizer
"""
if self.p.opt == 'adam' : return torch.optim.Adam(parameters, lr=self.p.lr, weight_decay=self.p.l2)
else : return torch.optim.SGD(parameters, lr=self.p.lr, weight_decay=self.p.l2)
def __init__(self, params):
"""
Constructor for the main function. Loads data and creates computation graph.
Parameters
----------
params: Hyperparameters of the model
Returns
-------
"""
self.p = params
self.p.save_dir = '{}/{}'.format(self.p.model_dir, self.p.name)
if not os.path.exists(self.p.log_dir): os.system('mkdir -p {}'.format(self.p.log_dir)) # Create log directory if doesn't exist
if not os.path.exists(self.p.save_dir): os.system('mkdir -p {}'.format(self.p.model_dir)) # Create model directory if doesn't exist
# Get Logger
self.logger = get_logger(self.p.name, self.p.log_dir, self.p.config_dir)
self.logger.info(vars(self.p)); pprint(vars(self.p))
if self.p.gpu != '-1' and torch.cuda.is_available():
self.device = torch.device('cuda')
torch.cuda.set_rng_state(torch.cuda.get_rng_state())
torch.backends.cudnn.deterministic = True
else:
self.device = torch.device('cpu')
self.load_data()
self.data.to(self.device)
self.model = self.add_model()
self.optimizer = self.add_optimizer(self.model.parameters())
def get_acc(self, logits, y_actual, mask):
"""
Calculates accuracy
Parameters
----------
logits: Output of the model
y_actual: Ground truth label of nodes
mask: Indicates the nodes to be considered for evaluation
Returns
-------
accuracy: Classification accuracy for labeled nodes
"""
y_pred = torch.max(logits, dim=1)[1]
return y_pred.eq(y_actual[mask]).sum().item() / mask.sum().item()
def evaluate(self, sess, split='valid'):
"""
Evaluate model on valid/test data
Parameters
----------
sess: Session of tensorflow
split: Data split to evaluate on
Returns
-------
loss: Loss over the entire data
acc: Overall Accuracy
"""
feed_dict = self.create_feed_dict(split=split)
loss, acc = sess.run([self.loss, self.accuracy], feed_dict=feed_dict)
return loss, acc
def run_epoch(self, epoch, shuffle=True):
"""
Runs one epoch of training and evaluation on validation set
Parameters
----------
sess: Session of tensorflow
data: Data to train on
epoch: Epoch number
shuffle: Shuffle data while before creates batches
Returns
-------
loss: Loss over the entire data
Accuracy: Overall accuracy
"""
t = time.time()
self.model.train()
self.model.train()
self.optimizer.zero_grad()
logits = self.model(self.data.x, self.data.edge_index)[self.data.train_mask]
train_loss = F.nll_loss(logits, self.data.y[self.data.train_mask])
train_loss.backward()
self.optimizer.step()
self.model.eval()
logits = self.model(self.data.x, self.data.edge_index)
train_acc = self.get_acc(logits[self.data.train_mask], self.data.y, self.data.train_mask)
val_acc = self.get_acc(logits[self.data.val_mask], self.data.y, self.data.val_mask)
if val_acc > self.best_val:
self.best_val = val_acc
self.best_test = self.get_acc(logits[self.data.test_mask], self.data.y, self.data.test_mask)
print( "Epoch:", '%04d' % (epoch + 1),
"train_loss=", "{:.5f}".format(train_loss),
"train_acc=", "{:.5f}".format(train_acc),
"val_acc=", "{:.5f}".format(val_acc),
"time=", "{:.5f}".format(time.time() - t))
def fit(self):
"""
Trains the model and finally evaluates on test
Parameters
----------
sess: Tensorflow session object
Returns
-------
"""
self.save_path = os.path.join(self.p.save_dir, 'best_int_avg')
self.best_val, self.best_test = 0.0, 0.0
if self.p.restore:
self.saver.restore(self.save_path)
for epoch in range(self.p.max_epochs):
train_loss = self.run_epoch(epoch)
print('Best Valid: {}, Best Test: {}'.format(self.best_val, self.best_test))
if __name__== "__main__":
parser = argparse.ArgumentParser(description='GNN for NLP tutorial - Kipf GCN')
parser.add_argument('--data', dest="data", default='cora', help='Dataset to use')
parser.add_argument('--gpu', dest="gpu", default='0', help='GPU to use')
parser.add_argument('--name', dest="name", default='test', help='Name of the run')
parser.add_argument('--lr', dest="lr", default=0.01, type=float, help='Learning rate')
parser.add_argument('--epoch', dest="max_epochs", default=200, type=int, help='Max epochs')
parser.add_argument('--l2', dest="l2", default=5e-4, type=float, help='L2 regularization')
parser.add_argument('--seed', dest="seed", default=1234, type=int, help='Seed for randomization')
parser.add_argument('--opt', dest="opt", default='adam', help='Optimizer to use for training')
# GCN-related params
parser.add_argument('--gcn_dim', dest="gcn_dim", default=16, type=int, help='GCN hidden dimension')
parser.add_argument('--drop', dest="dropout", default=0.5, type=float, help='Dropout for full connected layer')
parser.add_argument('--restore', dest="restore", action='store_true', help='Restore from the previous best saved model')
parser.add_argument('--log_dir', dest="log_dir", default='./log/', help='Log directory')
parser.add_argument('--model_dir', dest="config_dir", default='./config/', help='Config directory')
parser.add_argument('--config_dir', dest="model_dir", default='./models/', help='Model directory')
args = parser.parse_args()
if not args.restore: args.name = args.name + '_' + time.strftime("%d_%m_%Y") + '_' + time.strftime("%H:%M:%S")
# Set seed
np.random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Create Model
model = Main(args)
model.fit()
print('Model Trained Successfully!!')
| 7,611 | 29.448 | 139 | py |
cryptorandom | cryptorandom-main/doc/conf.py | #
# cryptorandom documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 21 12:13:15 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import date
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.imgmath',
'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'cryptorandom'
copyright = f'2018-{date.today().year}, Kellie Ottoboni and Philip B. Stark'
author = 'Kellie Ottoboni and Philip B. Stark'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import cryptorandom
version = cryptorandom.__version__
release = cryptorandom.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'cryptorandomdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cryptorandom.tex', 'cryptorandom Documentation',
'Kellie Ottoboni and Philip B. Stark', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cryptorandom', 'cryptorandom Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cryptorandom', 'cryptorandom Documentation',
author, 'cryptorandom', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 9,156 | 31.017483 | 79 | py |
DeeperForensicsChallengeSolution | DeeperForensicsChallengeSolution-master/dataset/dataset.py | import numpy as np
import os
import time
import sys
from tqdm import tqdm
import cv2
import torch
from torch.utils.data import Dataset, DataLoader
from albumentations.pytorch import ToTensor, ToTensorV2
from albumentations import (
Compose, HorizontalFlip, CLAHE, HueSaturationValue, Normalize, RandomBrightnessContrast,
RandomBrightness, RandomContrast, RandomGamma, OneOf, Resize, ImageCompression, Rotate,
ToFloat, ShiftScaleRotate, GridDistortion, ElasticTransform, JpegCompression, Cutout, GridDropout,
RGBShift, RandomBrightness, RandomContrast, Blur, MotionBlur, MedianBlur, GaussNoise, CenterCrop,
IAAAdditiveGaussianNoise, OpticalDistortion, RandomSizedCrop, VerticalFlip, GaussianBlur, CoarseDropout,
PadIfNeeded, ToGray, FancyPCA)
from catalyst.data.sampler import BalanceClassSampler
try:
from dataset.distortions import *
except:
sys.path.append('/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset')
from distortions import *
def get_train_transforms(size=300):
return Compose([
ImageCompression(quality_lower=60, quality_upper=100, p=0.5),
GaussNoise(p=0.1),
GaussianBlur(blur_limit=3, p=0.05),
HorizontalFlip(),
Resize(height=size, width=size),
PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
OneOf([RandomBrightnessContrast(), HueSaturationValue()], p=0.5), # FancyPCA(),
OneOf([CoarseDropout(), GridDropout()], p=0.2),
ToGray(p=0.2),
ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=0.5),
Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
ToTensorV2()
]
)
def get_valid_transforms(size=300):
return Compose([
Resize(height=size, width=size, p=1.0),
PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
ToTensorV2(p=1.0),
], p=1.0)
def one_hot(size, target):
vec = torch.zeros(size, dtype=torch.float32)
vec[target] = 1.
return vec
def get_file(path, format='png'):
files = os.listdir(path) # 得到文件夹下的所有文件,包含文件夹名称
FileList = []
for name in files:
if os.path.isdir(os.path.join(path, name)):
FileList.extend(get_file(os.path.join(path, name), format)) #回调函数,对所有子文件夹进行搜索
elif os.path.isfile(os.path.join(path, name)):
if format.lower() in name.lower():
FileList.append(os.path.join(path, name))
else:
print("未知文件:%s", name)
return FileList
def load_image_file_paths(real_root_path='/raid/chenby/DeepForensics/face_images/source_images',
real_target_root_path='/raid/chenby/DeepForensics/face_images/target_images/',
fake_root_path='/raid/chenby/DeepForensics/face_images/manipulated_images',
split_root_path='/raid/chenby/DeepForensics/face_images/lists/splits',
data_type='train'):
txt_path = os.path.join(split_root_path, data_type + '.txt')
f = open(txt_path, "r")
lines = f.readlines()
print(len(lines))
real_video_paths = []
real_target_video_paths = []
fake_video_paths = []
for line in lines:
line = line.strip().replace("\n", "")
fake_subsets = os.listdir(fake_root_path)
for fake_subset in fake_subsets:
fake = os.path.join(fake_root_path, fake_subset, line)
fake_video_paths.append(fake)
real_id_target = line.split('.')[0].split('_')[0] + '.mp4'
real_target_video_paths.append(os.path.join(real_target_root_path, 'c23', real_id_target))
real_target_video_paths.append(os.path.join(real_target_root_path, 'c40', real_id_target))
real_id = line.split('.')[0].split('_')[1]
real = os.path.join(real_root_path, real_id)
real_video_paths.append(real)
# print('fake:', len(fake_video_paths), 'real:', len(real_video_paths), real_video_paths[0])
real_video_paths = set(real_video_paths)
real_target_video_paths = set(real_target_video_paths)
fake_video_paths = set(fake_video_paths)
fake_image_paths = []
for fake_video_path in tqdm(fake_video_paths):
images = os.listdir(fake_video_path)
images = [os.path.join(fake_video_path, image) for image in images]
fake_image_paths += images
real_image_paths = []
for real_video_path in tqdm(real_video_paths):
images = get_file(real_video_path, format='png')
real_image_paths += images
print(len(real_image_paths))
for target_path in tqdm(real_target_video_paths):
images = sample_real_target_images(path=target_path, num_samples=4)
real_image_paths += images
print('Images fake:', len(fake_image_paths), 'real:', len(real_image_paths))
real_image_paths = np.array(real_image_paths)
fake_image_paths = np.array(fake_image_paths)
# np.save('split_npy/real_' + data_type + '.npy', real_image_paths)
# np.save('split_npy/fake_' + data_type + '.npy', fake_image_paths)
# return real_image_paths, fake_image_paths
# 用 ff++ 中的real c23 每个视频间隔4帧进行采样
def sample_real_target_images(path, num_samples=4):
images = sorted(os.listdir(path))
sample_images = []
for img in images:
id = int(img.split('.')[0])
if id % num_samples == 0:
sample_images.append(os.path.join(path, img))
return sample_images
def load_images_from_npy(data_type='train', npy_root='/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/'):
real_image_paths = np.load(npy_root + 'real_' + data_type + '.npy')
fake_image_paths = np.load(npy_root + 'fake_' + data_type + '.npy')
return real_image_paths, fake_image_paths
def load_images_from_npys(real_npys=[], fake_npys=[]):
for i, real in enumerate(real_npys):
if i == 0:
real_image_paths = np.load(real)
else:
real_image_paths = np.concatenate([real_image_paths, np.load(real)], axis=0)
for i, fake in enumerate(fake_npys):
if i == 0:
fake_image_paths = np.load(fake)
else:
fake_image_paths = np.concatenate([fake_image_paths, np.load(fake)], axis=0)
return real_image_paths, fake_image_paths
class DeeperForensicsDataset(Dataset):
def __init__(self, data_type='train', is_one_hot=True, transforms=None, classes_num=2):
super().__init__()
self.classes_num = classes_num
self.data_type = data_type
self.transforms = transforms
self.is_one_hot = is_one_hot
real_img_paths, fake_img_paths = load_images_from_npy(data_type=data_type)
print('real:', real_img_paths.shape, 'fake:', fake_img_paths.shape)
self.images = []
self.labels = []
for p in real_img_paths:
self.images.append(p)
self.labels.append(0)
for p in fake_img_paths:
self.images.append(p)
self.labels.append(1)
def __getitem__(self, index: int):
label = self.labels[index]
image_name = self.images[index]
image = cv2.imread(image_name, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
if self.transforms:
sample = {'image': image}
sample = self.transforms(**sample)
image = sample['image']
if self.is_one_hot:
label = one_hot(self.classes_num, label)
return image, label
def __len__(self) -> int:
return len(self.images)
def get_labels(self):
return list(self.labels)
class DeeperForensicsDatasetNew(Dataset):
def __init__(self, real_npys, fake_npys, is_one_hot=False, transforms=None, classes_num=2, data_type='train'):
super().__init__()
self.classes_num = classes_num
self.transforms = transforms
self.is_one_hot = is_one_hot
self.data_type = data_type
real_img_paths, fake_img_paths = load_images_from_npys(real_npys, fake_npys)
print('real:', real_img_paths.shape, 'fake:', fake_img_paths.shape)
self.images = []
self.labels = []
for p in real_img_paths:
self.images.append(p)
self.labels.append(0)
for p in fake_img_paths:
self.images.append(p)
self.labels.append(1)
def __getitem__(self, index: int):
label = self.labels[index]
image_name = self.images[index]
image = cv2.imread(image_name, cv2.IMREAD_COLOR)
if self.transforms:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
sample = {'image': image}
sample = self.transforms(**sample)
image = sample['image']
else:
if self.data_type == 'train':
image = my_augmentation(image)
image = cv2.resize(image, dsize=(224, 224))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255
image = np.transpose(image, (2, 0, 1))
if self.is_one_hot:
label = one_hot(self.classes_num, label)
return image, label
def __len__(self) -> int:
return len(self.images)
def get_labels(self):
return list(self.labels)
# 自定义数据增强
def my_augmentation(img):
type_list = ['CS', 'CC', 'BW', 'GNC', 'GB', 'JPEG']
if random.random() > 0.2:
# level_list = ['1', '2', '3', '4', '5']
type_id = random.randint(0, 5)
dist_type = type_list[type_id]
dist_level = random.randint(1, 5)
# get distortion parameter
dist_param = get_distortion_parameter(dist_type, dist_level)
# get distortion function
dist_function = get_distortion_function(dist_type)
img = dist_function(img, dist_param)
else:
# mixed aug
for dist_type in type_list:
if random.random() > 0.5:
dist_level = random.randint(1, 5)
dist_param = get_distortion_parameter(dist_type, dist_level)
# get distortion function
dist_function = get_distortion_function(dist_type)
img = dist_function(img, dist_param)
return img
if __name__ == '__main__':
# load_image_file_paths(data_type='test')
# real_image_paths, fake_image_paths = load_images_from_npy(data_type='val')
# print(real_image_paths.shape, fake_image_paths.shape)
# print(real_image_paths[0], fake_image_paths[0])
# sample = sample_real_target_images(path='/raid/chenby/DeepForensics/face_images/target_images/c23/000.mp4', num_samples=4)
# print('sample:', len(sample), sample[:5])
real_npys = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/real_60frames_train.npy']
fake_npys = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/fake_30frames_train.npy']
start = time.time()
xdl = DeeperForensicsDatasetNew(real_npys=real_npys, fake_npys=fake_npys, transforms=None, # get_train_transforms(size=224)
is_one_hot=False, classes_num=2)
# xdl = DeeperForensicsDataset(data_type='train', transforms=get_train_transforms(), is_one_hot=False)
print('length:', len(xdl))
train_loader = DataLoader(xdl, batch_size=16, shuffle=False, num_workers=4,
sampler=BalanceClassSampler(labels=xdl.get_labels(), mode="downsampling"))
for i, (img, label) in enumerate(train_loader):
print(i, img.shape, label.shape, label)
if i == 10:
break
end = time.time()
print('end iterate')
print('DataLoader total time: %fs' % (end - start))
pass
| 11,928 | 38.369637 | 139 | py |
DeeperForensicsChallengeSolution | DeeperForensicsChallengeSolution-master/train/train_add_data_my_aug.py | import sys
sys.path.append('..')
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import *
import time
from model.models import get_efficientnet
from dataset.dataset import DeeperForensicsDataset, get_train_transforms, get_valid_transforms, DeeperForensicsDatasetNew
from loss.losses import LabelSmoothing
from catalyst.data.sampler import BalanceClassSampler
from utils.utils import AverageMeter, calculate_metrics, Logger
train_real_paths_npy = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c23/real_30frames_FF_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c40/real_30frames_FF_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/real_60frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFDC_original/new/real_120frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/UADFV/new/real_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c23/real_240frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c40/real_240frames_train.npy',
'/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/real_train.npy',
#'/data1/cby/py_project/FaceForensics/classification/dataset/train_test_npy/train_real_paths.npy',
]
train_fake_paths_npy = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c23/fake_30frames_FF_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c40/fake_30frames_FF_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/fake_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFDC_original/new/fake_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/UADFV/new/fake_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c23/fake_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c40/fake_30frames_train.npy',
'/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/fake_train.npy',
#'/data1/cby/py_project/FaceForensics/classification/dataset/train_test_npy/train_fake_paths.npy'
]
# '/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c23/real_30frames_FF_val.npy',
# '/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c40/real_30frames_FF_val.npy',
val_real_paths_npy = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/real_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFDC_original/new/real_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/UADFV/new/real_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c23/real_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c40/real_30frames_test.npy',
'/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/real_val.npy',
#'/data1/cby/py_project/FaceForensics/classification/dataset/train_test_npy/test_real_paths.npy'
]
# '/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c23/fake_30frames_FF_val.npy',
# '/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c40/fake_30frames_FF_val.npy',
val_fake_paths_npy = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/fake_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFDC_original/new/fake_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/UADFV/new/fake_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c23/fake_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c40/fake_30frames_test.npy',
'/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/fake_val.npy',
#'/data1/cby/py_project/FaceForensics/classification/dataset/train_test_npy/test_fake_paths.npy'
]
def eval_model(epoch, is_save=True):
batch_time = AverageMeter()
losses = AverageMeter()
acc_score = AverageMeter()
model.eval()
num_steps = len(eval_loader)
print(f'total batches: {num_steps}')
end = time.time()
eval_criterion = nn.CrossEntropyLoss()
with torch.no_grad():
for i, (XI, label) in enumerate(eval_loader):
x = Variable(XI.cuda(device_id))
# label = Variable(torch.LongTensor(label).cuda(device_id))
label = Variable(label.cuda(device_id))
# Forward pass: Compute predicted y by passing x to the model
output = model(x)
# Compute and print loss
loss = eval_criterion(output, label)
losses.update(loss.data.item(), x.size(0))
# update metrics
output = nn.Softmax(dim=1)(output)
confs, predicts = torch.max(output.detach(), dim=1)
acc_score.update(calculate_metrics(predicts.cpu(), label.cpu()), 1)
lr = optimizer.param_groups[0]['lr']
batch_time.update(time.time() - end)
end = time.time()
if i % LOG_FREQ == 0:
print(f'{epoch} [{i}/{num_steps}]\t'
f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'acc {acc_score.val:.4f} ({acc_score.avg:.4f})\t'
f'lr {lr:.8f}')
print(f' * Eval loss {losses.avg:.4f}\t'f'acc({acc_score.avg:.4f})\t'f'total time {batch_time.sum}')
if is_save:
train_logger.log(phase="eval", values={
'epoch': epoch,
'loss': format(losses.avg, '.4f'),
'acc': format(acc_score.avg, '.4f'),
'lr': optimizer.param_groups[0]['lr']
})
scheduler.step()
return losses.avg
def train_model(epoch):
batch_time = AverageMeter()
losses = AverageMeter()
acc_score = AverageMeter()
model.train()
num_steps = len(train_loader)
print(f'total batches: {num_steps}')
end = time.time()
for i, (XI, label) in enumerate(train_loader):
x = Variable(XI.cuda(device_id))
# label = Variable(torch.LongTensor(label).cuda(device_id))
label = Variable(label.cuda(device_id))
# Forward pass: Compute predicted y by passing x to the model
output = model(x)
# Compute and print loss
loss = criterion(output, label)
# update metrics
losses.update(loss.data.item(), x.size(0))
confs, predicts = torch.max(output.detach(), dim=1)
acc_score.update(calculate_metrics(predicts.cpu(), label.cpu()), 1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr = optimizer.param_groups[0]['lr']
batch_time.update(time.time() - end)
end = time.time()
if i % LOG_FREQ == 0:
print(f'{epoch} [{i}/{num_steps}]\t'
f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'acc {acc_score.val:.4f} ({acc_score.avg:.4f})\t'
f'lr {lr:.8f}')
print(f' * Train loss {losses.avg:.4f}\t'f'acc({acc_score.avg:.4f})\t'f'time {batch_time.sum}')
train_logger.log(phase="train", values={
'epoch': epoch,
'loss': format(losses.avg, '.4f'),
'acc': format(acc_score.avg, '.4f'),
'lr': optimizer.param_groups[0]['lr']
})
scheduler.step()
return losses.val
if __name__ == '__main__':
LOG_FREQ = 50
batch_size = 128
test_batch_size = 128
device_id = 0
lr = 1e-3
epoch_start = 1
num_epochs = epoch_start + 50
model_name = 'efficientnet-b1'
writeFile = '/data1/cby/temp/output_my_aug/logs/' + model_name
store_name = '/data1/cby/temp/output_my_aug/weights/' + model_name
if not os.path.isdir(store_name):
os.makedirs(store_name)
model_path = None
# model_path = '/data1/cby/temp/output_my_aug/weights/efficientnet-b1/efn-b1_LS_9_loss_0.1610.pth'
model = get_efficientnet(model_name=model_name)
if model_path is not None:
# model = torch.load(model_path)
model.load_state_dict(torch.load(model_path, map_location='cpu'))
print('Model found in {}'.format(model_path))
else:
print('No model found, initializing random model.')
model = model.cuda(device_id)
train_logger = Logger(model_name=writeFile, header=['epoch', 'loss', 'acc', 'lr'])
# criterion = nn.CrossEntropyLoss()
criterion = LabelSmoothing(smoothing=0.05).cuda(device_id)
# optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
# optimizer = optim.Adam(model.parameters(), lr=lr)
optimizer = optim.AdamW(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.9)
is_train = True
if is_train:
xdl = DeeperForensicsDatasetNew(real_npys=train_real_paths_npy, fake_npys=train_fake_paths_npy,
is_one_hot=True, transforms=None)
train_loader = DataLoader(xdl, batch_size=batch_size, shuffle=False, num_workers=4,
sampler=BalanceClassSampler(labels=xdl.get_labels(), mode="downsampling"))
# train_loader = DataLoader(xdl, batch_size=batch_size, shuffle=True, num_workers=4)
train_dataset_len = len(xdl)
xdl_eval = DeeperForensicsDatasetNew(real_npys=val_real_paths_npy, fake_npys=val_fake_paths_npy,
is_one_hot=False, transforms=None, data_type='val')
eval_loader = DataLoader(xdl_eval, batch_size=test_batch_size, shuffle=False, num_workers=4)
eval_dataset_len = len(xdl_eval)
print('train_dataset_len:', train_dataset_len, 'eval_dataset_len:', eval_dataset_len)
min_loss = 100 if epoch_start == 1 else eval_model(epoch=epoch_start, is_save=False)
for epoch in range(epoch_start, num_epochs):
train_model(epoch)
loss = eval_model(epoch)
if loss < min_loss:
min_loss = loss
torch.save(model.state_dict(), '{}/efn-b1_LS_{}_loss_{:.4f}.pth'.format(store_name, epoch, loss))
print('Current min loss:', min_loss)
torch.save(model.state_dict(), '{}/efn-b1_LS_{}_loss_{:.4f}.pth'.format(store_name, 'last_50', loss))
else:
start = time.time()
epoch_start = 1
num_epochs = 1
val_real_paths_npy = ['/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/real_test.npy']
val_fake_paths_npy = ['/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/fake_test.npy']
xdl_test = DeeperForensicsDatasetNew(real_npys=val_real_paths_npy, fake_npys=val_fake_paths_npy,
is_one_hot=False, transforms=None, data_type='test')
eval_loader = DataLoader(xdl_test, batch_size=test_batch_size, shuffle=False, num_workers=4)
test_dataset_len = len(xdl_test)
print('test_dataset_len:', test_dataset_len)
eval_model(epoch=0, is_save=False)
print('Total time:', time.time() - start)
| 12,235 | 51.741379 | 134 | py |
DeeperForensicsChallengeSolution | DeeperForensicsChallengeSolution-master/train/train_add_data.py | import sys
sys.path.append('..')
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import time
from model.models import get_efficientnet
from dataset.dataset import DeeperForensicsDataset, get_train_transforms, get_valid_transforms, DeeperForensicsDatasetNew
from loss.losses import LabelSmoothing
from catalyst.data.sampler import BalanceClassSampler
from utils.utils import AverageMeter, calculate_metrics, Logger
train_real_paths_npy = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c23/real_30frames_FF_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c40/real_30frames_FF_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/real_60frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFDC_original/new/real_120frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/UADFV/new/real_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c23/real_240frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c40/real_240frames_train.npy',
'/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/real_train.npy',
#'/data1/cby/py_project/FaceForensics/classification/dataset/train_test_npy/train_real_paths.npy',
]
train_fake_paths_npy = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c23/fake_30frames_FF_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c40/fake_30frames_FF_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/fake_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFDC_original/new/fake_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/UADFV/new/fake_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c23/fake_30frames_train.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c40/fake_30frames_train.npy',
'/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/fake_train.npy',
#'/data1/cby/py_project/FaceForensics/classification/dataset/train_test_npy/train_fake_paths.npy'
]
# '/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c23/real_30frames_FF_val.npy',
# '/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c40/real_30frames_FF_val.npy',
val_real_paths_npy = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/real_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFDC_original/new/real_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/UADFV/new/real_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c23/real_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c40/real_30frames_test.npy',
'/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/real_val.npy',
#'/data1/cby/py_project/FaceForensics/classification/dataset/train_test_npy/test_real_paths.npy'
]
# '/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c23/fake_30frames_FF_val.npy',
# '/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/c40/fake_30frames_FF_val.npy',
val_fake_paths_npy = ['/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/Celeb-DF-v1_mtcnn/new/fake_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFDC_original/new/fake_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/UADFV/new/fake_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c23/fake_30frames_test.npy',
'/data1/cby/py_project/FaceForensics/dataset/splits/images_npy/DFD/c40/fake_30frames_test.npy',
'/data1/cby/py_project/DeeperForensicsChallengeSubmissionExample/dataset/split_npy/fake_val.npy',
#'/data1/cby/py_project/FaceForensics/classification/dataset/train_test_npy/test_fake_paths.npy'
]
def eval_model(epoch, is_save=True):
batch_time = AverageMeter()
losses = AverageMeter()
acc_score = AverageMeter()
model.eval()
num_steps = len(eval_loader)
print(f'total batches: {num_steps}')
end = time.time()
eval_criterion = nn.CrossEntropyLoss()
with torch.no_grad():
for i, (XI, label) in enumerate(eval_loader):
x = Variable(XI.cuda(device_id))
# label = Variable(torch.LongTensor(label).cuda(device_id))
label = Variable(label.cuda(device_id))
# Forward pass: Compute predicted y by passing x to the model
output = model(x)
# Compute and print loss
loss = eval_criterion(output, label)
losses.update(loss.data.item(), x.size(0))
# update metrics
output = nn.Softmax(dim=1)(output)
confs, predicts = torch.max(output.detach(), dim=1)
acc_score.update(calculate_metrics(predicts.cpu(), label.cpu()), 1)
lr = optimizer.param_groups[0]['lr']
batch_time.update(time.time() - end)
end = time.time()
if i % LOG_FREQ == 0:
print(f'{epoch} [{i}/{num_steps}]\t'
f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'acc {acc_score.val:.4f} ({acc_score.avg:.4f})\t'
f'lr {lr:.8f}')
print(f' * Eval loss {losses.avg:.4f}\t'f'acc({acc_score.avg:.4f})')
if is_save:
train_logger.log(phase="eval", values={
'epoch': epoch,
'loss': format(losses.avg, '.4f'),
'acc': format(acc_score.avg, '.4f'),
'lr': optimizer.param_groups[0]['lr']
})
scheduler.step()
return losses.avg
def train_model(epoch):
batch_time = AverageMeter()
losses = AverageMeter()
acc_score = AverageMeter()
model.train()
num_steps = len(train_loader)
print(f'total batches: {num_steps}')
end = time.time()
for i, (XI, label) in enumerate(train_loader):
x = Variable(XI.cuda(device_id))
# label = Variable(torch.LongTensor(label).cuda(device_id))
label = Variable(label.cuda(device_id))
# Forward pass: Compute predicted y by passing x to the model
output = model(x)
# Compute and print loss
loss = criterion(output, label)
# update metrics
losses.update(loss.data.item(), x.size(0))
confs, predicts = torch.max(output.detach(), dim=1)
acc_score.update(calculate_metrics(predicts.cpu(), label.cpu()), 1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr = optimizer.param_groups[0]['lr']
batch_time.update(time.time() - end)
end = time.time()
if i % LOG_FREQ == 0:
print(f'{epoch} [{i}/{num_steps}]\t'
f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'acc {acc_score.val:.4f} ({acc_score.avg:.4f})\t'
f'lr {lr:.8f}')
print(f' * Train loss {losses.avg:.4f}\t'f'acc({acc_score.avg:.4f})')
train_logger.log(phase="train", values={
'epoch': epoch,
'loss': format(losses.avg, '.4f'),
'acc': format(acc_score.avg, '.4f'),
'lr': optimizer.param_groups[0]['lr']
})
scheduler.step()
return losses.val
if __name__ == '__main__':
LOG_FREQ = 100
batch_size = 64
test_batch_size = 128
device_id = 2
lr = 1e-3
epoch_start = 1
num_epochs = epoch_start + 50
model_name = 'efficientnet-b3'
writeFile = '/data1/cby/temp/output_3/logs/' + model_name
store_name = '/data1/cby/temp/output_3/weights/' + model_name
if not os.path.isdir(store_name):
os.makedirs(store_name)
# model_path = None
model_path = '/data1/cby/temp/output_3/weights/efficientnet-b3/efn-b3_LS_24_loss_0.1883.pth'
model = get_efficientnet(model_name=model_name)
if model_path is not None:
# model = torch.load(model_path)
model.load_state_dict(torch.load(model_path, map_location='cpu'))
print('Model found in {}'.format(model_path))
else:
print('No model found, initializing random model.')
model = model.cuda(device_id)
train_logger = Logger(model_name=writeFile, header=['epoch', 'loss', 'acc', 'lr'])
# criterion = nn.CrossEntropyLoss()
criterion = LabelSmoothing(smoothing=0.05).cuda(device_id)
# optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
# optimizer = optim.Adam(model.parameters(), lr=lr)
optimizer = optim.AdamW(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.9)
is_train = False
if is_train:
xdl = DeeperForensicsDatasetNew(real_npys=train_real_paths_npy, fake_npys=train_fake_paths_npy,
is_one_hot=True, transforms=get_train_transforms(size=300))
train_loader = DataLoader(xdl, batch_size=batch_size, shuffle=False, num_workers=4,
sampler=BalanceClassSampler(labels=xdl.get_labels(), mode="downsampling"))
# train_loader = DataLoader(xdl, batch_size=batch_size, shuffle=True, num_workers=4)
train_dataset_len = len(xdl)
xdl_eval = DeeperForensicsDatasetNew(real_npys=val_real_paths_npy, fake_npys=val_fake_paths_npy,
is_one_hot=False, transforms=get_valid_transforms(size=300))
eval_loader = DataLoader(xdl_eval, batch_size=test_batch_size, shuffle=False, num_workers=4)
eval_dataset_len = len(xdl_eval)
print('train_dataset_len:', train_dataset_len, 'eval_dataset_len:', eval_dataset_len)
min_loss = 100 if epoch_start == 1 else eval_model(epoch=epoch_start, is_save=False)
for epoch in range(epoch_start, num_epochs):
train_model(epoch)
loss = eval_model(epoch)
if loss < min_loss:
min_loss = loss
torch.save(model.state_dict(), '{}/efn-b5_LS_{}_loss_{:.4f}.pth'.format(store_name, epoch, loss))
print('Current min loss:', min_loss)
torch.save(model.state_dict(), '{}/efn-b5_LS_{}_loss_{:.4f}.pth'.format(store_name, 'last_50', loss))
else:
start = time.time()
epoch_start = 1
num_epochs = 1
xdl_test = DeeperForensicsDataset(data_type='test', transforms=get_valid_transforms(size=300), is_one_hot=False)
eval_loader = DataLoader(xdl_test, batch_size=test_batch_size, shuffle=False, num_workers=4)
test_dataset_len = len(xdl_test)
print('test_dataset_len:', test_dataset_len)
eval_model(epoch=0, is_save=False)
print('Total time:', time.time() - start)
| 11,868 | 50.829694 | 134 | py |
DeeperForensicsChallengeSolution | DeeperForensicsChallengeSolution-master/train/train.py | import sys
sys.path.append('..')
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import *
import time
from model.models import get_efficientnet
from dataset.dataset import DeeperForensicsDataset, get_train_transforms, get_valid_transforms
from loss.losses import LabelSmoothing
from catalyst.data.sampler import BalanceClassSampler
from utils.utils import AverageMeter, calculate_metrics, Logger
def eval_model(epoch, is_save=True):
batch_time = AverageMeter()
losses = AverageMeter()
acc_score = AverageMeter()
model.eval()
num_steps = len(eval_loader)
print(f'total batches: {num_steps}')
end = time.time()
with torch.no_grad():
for i, (XI, label) in enumerate(eval_loader):
x = Variable(XI.cuda(device_id))
# label = Variable(torch.LongTensor(label).cuda(device_id))
label = Variable(label.cuda(device_id))
# Forward pass: Compute predicted y by passing x to the model
output = model(x)
# Compute and print loss
loss = criterion(output, label)
losses.update(loss.data.item(), x.size(0))
# update metrics
output = nn.Softmax(dim=1)(output)
confs, predicts = torch.max(output.detach(), dim=1)
acc_score.update(calculate_metrics(predicts.cpu(), label.cpu()), 1)
lr = optimizer.param_groups[0]['lr']
batch_time.update(time.time() - end)
end = time.time()
if i % LOG_FREQ == 0:
print(f'{epoch} [{i}/{num_steps}]\t'
f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'acc {acc_score.val:.4f} ({acc_score.avg:.4f})\t'
f'lr {lr:.8f}')
print(f' * Eval loss {losses.avg:.4f}\t'f'acc({acc_score.avg:.4f})')
if is_save:
train_logger.log(phase="eval", values={
'epoch': epoch,
'loss': format(losses.avg, '.4f'),
'acc': format(acc_score.avg, '.4f'),
'lr': optimizer.param_groups[0]['lr']
})
scheduler.step()
return losses.avg
def train_model(epoch):
batch_time = AverageMeter()
losses = AverageMeter()
acc_score = AverageMeter()
model.train()
num_steps = len(train_loader)
print(f'total batches: {num_steps}')
end = time.time()
for i, (XI, label) in enumerate(train_loader):
x = Variable(XI.cuda(device_id))
# label = Variable(torch.LongTensor(label).cuda(device_id))
label = Variable(label.cuda(device_id))
# Forward pass: Compute predicted y by passing x to the model
output = model(x)
# Compute and print loss
loss = criterion(output, label)
# update metrics
losses.update(loss.data.item(), x.size(0))
confs, predicts = torch.max(output.detach(), dim=1)
acc_score.update(calculate_metrics(predicts.cpu(), label.cpu()), 1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr = optimizer.param_groups[0]['lr']
batch_time.update(time.time() - end)
end = time.time()
if i % LOG_FREQ == 0:
print(f'{epoch} [{i}/{num_steps}]\t'
f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'acc {acc_score.val:.4f} ({acc_score.avg:.4f})\t'
f'lr {lr:.8f}')
print(f' * Train loss {losses.avg:.4f}\t'f'acc({acc_score.avg:.4f})')
train_logger.log(phase="train", values={
'epoch': epoch,
'loss': format(losses.avg, '.4f'),
'acc': format(acc_score.avg, '.4f'),
'lr': optimizer.param_groups[0]['lr']
})
scheduler.step()
return losses.val
if __name__ == '__main__':
LOG_FREQ = 50
batch_size = 128
test_batch_size = 128
device_id = 0
lr = 0.0001
epoch_start = 1
num_epochs = epoch_start + 20
model_name = 'efficientnet-b0'
writeFile = '/data1/cby/temp/output_2/logs/' + model_name
store_name = '/data1/cby/temp/output_2/weights/' + model_name
if not os.path.isdir(store_name):
os.makedirs(store_name)
# model_path = None
model_path = '/data1/cby/temp/output_3/weights/efficientnet-b0/efn_6_loss_0.2318.pth'
model = get_efficientnet(model_name=model_name)
if model_path is not None:
# model = torch.load(model_path)
model.load_state_dict(torch.load(model_path, map_location='cpu'))
print('Model found in {}'.format(model_path))
else:
print('No model found, initializing random model.')
model = model.cuda(device_id)
train_logger = Logger(model_name=writeFile, header=['epoch', 'loss', 'acc', 'lr'])
criterion = nn.CrossEntropyLoss()
# criterion = LabelSmoothing(smoothing=0.05).cuda(device_id)
# optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
# optimizer = optim.Adam(model.parameters(), lr=lr)
optimizer = optim.AdamW(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.9)
is_train = False
if is_train:
xdl = DeeperForensicsDataset(is_one_hot=False, data_type='train', transforms=get_train_transforms())
train_loader = DataLoader(xdl, batch_size=batch_size, shuffle=False, num_workers=4,
sampler=BalanceClassSampler(labels=xdl.get_labels(), mode="downsampling"))
# train_loader = DataLoader(xdl, batch_size=batch_size, shuffle=True, num_workers=4)
train_dataset_len = len(xdl)
xdl_eval = DeeperForensicsDataset(is_one_hot=False, data_type='val', transforms=get_valid_transforms())
eval_loader = DataLoader(xdl_eval, batch_size=test_batch_size, shuffle=False, num_workers=4)
eval_dataset_len = len(xdl_eval)
print('train_dataset_len:', train_dataset_len, 'eval_dataset_len:', eval_dataset_len)
min_loss = 100 if epoch_start == 1 else eval_model(epoch=epoch_start, is_save=False)
for epoch in range(epoch_start, num_epochs):
train_model(epoch)
loss = eval_model(epoch)
if loss < min_loss:
min_loss = loss
torch.save(model.state_dict(), '{}/efn_{}_loss_{:.4f}.pth'.format(store_name, epoch, loss))
print('Current min loss:', min_loss)
torch.save(model.state_dict(), '{}/efn_{}_loss_{:.4f}.pth'.format(store_name, 'last_20', loss))
else:
start = time.time()
epoch_start = 1
num_epochs = 1
xdl_test = DeeperForensicsDataset(data_type='val', transforms=get_valid_transforms(), is_one_hot=False)
eval_loader = DataLoader(xdl_test, batch_size=test_batch_size, shuffle=False, num_workers=4)
test_dataset_len = len(xdl_test)
print('test_dataset_len:', test_dataset_len)
eval_model(epoch=0, is_save=False)
print('Total time:', time.time() - start)
| 7,130 | 38.181319 | 111 | py |
DeeperForensicsChallengeSolution | DeeperForensicsChallengeSolution-master/loss/losses.py | import torch
import torch.nn as nn
class LabelSmoothing(nn.Module):
def __init__(self, smoothing=0.05):
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
if self.training:
x = x.float()
target = target.float()
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs * target
nll_loss = nll_loss.sum(-1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
else:
return torch.nn.functional.cross_entropy(x, target)
| 743 | 27.615385 | 76 | py |
DeeperForensicsChallengeSolution | DeeperForensicsChallengeSolution-master/utils/utils.py | import tensorboardX
from sklearn.metrics import log_loss, accuracy_score, precision_score, average_precision_score, roc_auc_score, recall_score
import torch
class Logger(object):
def __init__(self, model_name, header):
self.header = header
self.writer = tensorboardX.SummaryWriter(model_name)
def __del(self):
self.writer.close()
def log(self, phase, values):
epoch = values['epoch']
for col in self.header[1:]:
self.writer.add_scalar(phase + "/" + col, float(values[col]), int(epoch))
class AverageMeter:
''' Computes and stores the average and current value '''
def __init__(self) -> None:
self.reset()
def reset(self) -> None:
self.val = 0.0
self.avg = 0.0
self.sum = 0.0
self.count = 0
def update(self, val: float, n: int = 1) -> None:
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def calculate_metrics(outputs, targets):
# print(len(outputs.detach().numpy().shape), len(targets.data.numpy().shape))
if len(targets.data.numpy().shape) > 1:
_, targets = torch.max(targets.detach(), dim=1)
acc = accuracy_score(outputs.detach().numpy(), targets.detach().numpy())
# loss = log_loss(outputs.detach().numpy(), targets.data.numpy())
return acc | 1,368 | 31.595238 | 123 | py |
DeeperForensicsChallengeSolution | DeeperForensicsChallengeSolution-master/data/detect_face.py | """ Tensorflow implementation of the face detection / alignment algorithm found at
https://github.com/kpzhang93/MTCNN_face_detection_alignment
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types, iteritems
import numpy as np
import tensorflow as tf
#from math import floor
import cv2
import os
def layer(op):
"""Decorator for composable network layers."""
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
"""Construct the network. """
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
"""Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
"""
data_dict = np.load(data_path, encoding='latin1', allow_pickle=True).item() #pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
"""Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
"""
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
"""Returns the current network output."""
return self.terminals[-1]
def get_unique_name(self, prefix):
"""Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
"""
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
"""Creates a new TensorFlow variable."""
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
"""Verifies that the padding is one of the supported ones."""
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= int(d)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keepdims=True)
target_exp = tf.exp(target-max_axis)
normalize = tf.reduce_sum(target_exp, axis, keepdims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3,name='prob1'))
(self.feed('PReLU3') #pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1,name='prob1'))
(self.feed('prelu4') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
def create_mtcnn(sess, model_path):
if not model_path:
model_path,_ = os.path.split(os.path.realpath(__file__))
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
pnet = PNet({'data':data})
pnet.load(os.path.join(model_path, 'det1.npy'), sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
rnet = RNet({'data':data})
rnet.load(os.path.join(model_path, 'det2.npy'), sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
onet = ONet({'data':data})
onet.load(os.path.join(model_path, 'det3.npy'), sess)
pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
"""Detects faces in an image, and returns bounding boxes and points for them.
img: input image
minsize: minimum faces' size
pnet, rnet, onet: caffemodel
threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
factor_count=0
total_boxes=np.empty((0,9))
points=np.empty(0)
h=img.shape[0]
w=img.shape[1]
minl=np.amin([h, w])
m=12.0/minsize
minl=minl*m
# create scale pyramid
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
factor_count += 1
# first stage
for scale in scales:
hs=int(np.ceil(h*scale))
ws=int(np.ceil(w*scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data-127.5)*0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0,2,1,3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0,2,1,3))
out1 = np.transpose(out[1], (0,2,1,3))
boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size>0 and pick.size>0:
boxes = boxes[pick,:]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox>0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick,:]
regw = total_boxes[:,2]-total_boxes[:,0]
regh = total_boxes[:,3]-total_boxes[:,1]
qq1 = total_boxes[:,0]+total_boxes[:,5]*regw
qq2 = total_boxes[:,1]+total_boxes[:,6]*regh
qq3 = total_boxes[:,2]+total_boxes[:,7]*regw
qq4 = total_boxes[:,3]+total_boxes[:,8]*regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox>0:
# second stage
tempimg = np.zeros((24,24,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1,:]
ipass = np.where(score>threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
if total_boxes.shape[0]>0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick,:]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox>0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48,48,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1,:]
points = out1
ipass = np.where(score>threshold[2])
points = points[:,ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
w = total_boxes[:,2]-total_boxes[:,0]+1
h = total_boxes[:,3]-total_boxes[:,1]+1
points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
if total_boxes.shape[0]>0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick,:]
points = points[:,pick]
return total_boxes, points
def bulk_detect_face(images, detection_window_size_ratio, pnet, rnet, onet, threshold, factor):
"""Detects faces in a list of images
images: list containing input images
detection_window_size_ratio: ratio of minimum face size to smallest image dimension
pnet, rnet, onet: caffemodel
threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1]
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
all_scales = [None] * len(images)
images_with_boxes = [None] * len(images)
for i in range(len(images)):
images_with_boxes[i] = {'total_boxes': np.empty((0, 9))}
# create scale pyramid
for index, img in enumerate(images):
all_scales[index] = []
h = img.shape[0]
w = img.shape[1]
minsize = int(detection_window_size_ratio * np.minimum(w, h))
factor_count = 0
minl = np.amin([h, w])
if minsize <= 12:
minsize = 12
m = 12.0 / minsize
minl = minl * m
while minl >= 12:
all_scales[index].append(m * np.power(factor, factor_count))
minl = minl * factor
factor_count += 1
# # # # # # # # # # # # #
# first stage - fast proposal network (pnet) to obtain face candidates
# # # # # # # # # # # # #
images_obj_per_resolution = {}
# TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images
for index, scales in enumerate(all_scales):
h = images[index].shape[0]
w = images[index].shape[1]
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
if (ws, hs) not in images_obj_per_resolution:
images_obj_per_resolution[(ws, hs)] = []
im_data = imresample(images[index], (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_y = np.transpose(im_data, (1, 0, 2)) # caffe uses different dimensions ordering
images_obj_per_resolution[(ws, hs)].append({'scale': scale, 'image': img_y, 'index': index})
for resolution in images_obj_per_resolution:
images_per_resolution = [i['image'] for i in images_obj_per_resolution[resolution]]
outs = pnet(images_per_resolution)
for index in range(len(outs[0])):
scale = images_obj_per_resolution[resolution][index]['scale']
image_index = images_obj_per_resolution[resolution][index]['index']
out0 = np.transpose(outs[0][index], (1, 0, 2))
out1 = np.transpose(outs[1][index], (1, 0, 2))
boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
images_with_boxes[image_index]['total_boxes'] = np.append(images_with_boxes[image_index]['total_boxes'],
boxes,
axis=0)
for index, image_obj in enumerate(images_with_boxes):
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0]
regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1]
qq1 = image_obj['total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw
qq2 = image_obj['total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh
qq3 = image_obj['total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw
qq4 = image_obj['total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh
image_obj['total_boxes'] = np.transpose(np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
image_obj['total_boxes'][:, 0:4] = np.fix(image_obj['total_boxes'][:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
numbox = image_obj['total_boxes'].shape[0]
tempimg = np.zeros((24, 24, 3, numbox))
if numbox > 0:
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
# # # # # # # # # # # # #
# second stage - refinement of face candidates with rnet
# # # # # # # # # # # # #
bulk_rnet_input = np.empty((0, 24, 24, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' in image_obj:
bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0)
out = rnet(bulk_rnet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
i = 0
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' not in image_obj:
continue
rnet_input_count = image_obj['rnet_input'].shape[0]
score_per_image = score[i:i + rnet_input_count]
out0_per_image = out0[:, i:i + rnet_input_count]
ipass = np.where(score_per_image > threshold[1])
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
if image_obj['total_boxes'].shape[0] > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'], 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
tempimg = np.zeros((48, 48, 3, numbox))
image_obj['total_boxes'] = np.fix(image_obj['total_boxes']).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
i += rnet_input_count
# # # # # # # # # # # # #
# third stage - further refinement and facial landmarks positions with onet
# # # # # # # # # # # # #
bulk_onet_input = np.empty((0, 48, 48, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' in image_obj:
bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0)
out = onet(bulk_onet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
i = 0
ret = []
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' not in image_obj:
ret.append(None)
continue
onet_input_count = image_obj['onet_input'].shape[0]
out0_per_image = out0[:, i:i + onet_input_count]
score_per_image = score[i:i + onet_input_count]
points_per_image = points[:, i:i + onet_input_count]
ipass = np.where(score_per_image > threshold[2])
points_per_image = points_per_image[:, ipass[0]]
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
w = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] + 1
h = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] + 1
points_per_image[0:5, :] = np.tile(w, (5, 1)) * points_per_image[0:5, :] + np.tile(
image_obj['total_boxes'][:, 0], (5, 1)) - 1
points_per_image[5:10, :] = np.tile(h, (5, 1)) * points_per_image[5:10, :] + np.tile(
image_obj['total_boxes'][:, 1], (5, 1)) - 1
if image_obj['total_boxes'].shape[0] > 0:
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv))
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Min')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
points_per_image = points_per_image[:, pick]
ret.append((image_obj['total_boxes'], points_per_image))
else:
ret.append(None)
i += onet_input_count
return ret
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox,reg):
"""Calibrate bounding boxes"""
if reg.shape[1]==1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:,2]-boundingbox[:,0]+1
h = boundingbox[:,3]-boundingbox[:,1]+1
b1 = boundingbox[:,0]+reg[:,0]*w
b2 = boundingbox[:,1]+reg[:,1]*h
b3 = boundingbox[:,2]+reg[:,2]*w
b4 = boundingbox[:,3]+reg[:,3]*h
boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride=2
cellsize=12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:,:,0])
dy1 = np.transpose(reg[:,:,1])
dx2 = np.transpose(reg[:,:,2])
dy2 = np.transpose(reg[:,:,3])
y, x = np.where(imap >= t)
if y.shape[0]==1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y,x)]
reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ]))
if reg.size==0:
reg = np.empty((0,3))
bb = np.transpose(np.vstack([y,x]))
q1 = np.fix((stride*bb+1)/scale)
q2 = np.fix((stride*bb+cellsize-1+1)/scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = (x2-x1+1) * (y2-y1+1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size>0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o<=threshold)]
pick = pick[0:counter]
return pick
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
"""Compute the padding coordinates (pad the bounding boxes to square)"""
tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32)
tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:,0].copy().astype(np.int32)
y = total_boxes[:,1].copy().astype(np.int32)
ex = total_boxes[:,2].copy().astype(np.int32)
ey = total_boxes[:,3].copy().astype(np.int32)
tmp = np.where(ex>w)
edx.flat[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],1)
ex[tmp] = w
tmp = np.where(ey>h)
edy.flat[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],1)
ey[tmp] = h
tmp = np.where(x<1)
dx.flat[tmp] = np.expand_dims(2-x[tmp],1)
x[tmp] = 1
tmp = np.where(y<1)
dy.flat[tmp] = np.expand_dims(2-y[tmp],1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
"""Convert bboxA to square."""
h = bboxA[:,3]-bboxA[:,1]
w = bboxA[:,2]-bboxA[:,0]
l = np.maximum(w, h)
bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5
bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
return im_data
# This method is kept for debugging purpose
# h=img.shape[0]
# w=img.shape[1]
# hs, ws = sz
# dx = float(w) / ws
# dy = float(h) / hs
# im_data = np.zeros((hs,ws,3))
# for a1 in range(0,hs):
# for a2 in range(0,ws):
# for a3 in range(0,3):
# im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
# return im_data
| 31,714 | 39.556266 | 150 | py |
DeeperForensicsChallengeSolution | DeeperForensicsChallengeSolution-master/model/models.py | import torch
import pretrainedmodels
import torch.nn as nn
from torch.nn import init
import torchvision
from efficientnet_pytorch import EfficientNet
import torch.nn.functional as F
import numpy as np
import math
def get_efficientnet(model_name='efficientnet-b0', num_classes=2, pretrained=True):
if pretrained:
net = EfficientNet.from_pretrained(model_name)
else:
net = EfficientNet.from_name(model_name)
in_features = net._fc.in_features
net._fc = nn.Linear(in_features=in_features, out_features=num_classes, bias=True)
return net
if __name__ == '__main__':
model, image_size = get_efficientnet(model_name='efficientnet-b0', num_classes=2, pretrained=True), 224
model = model.to(torch.device('cpu'))
from torchsummary import summary
input_s = (3, image_size, image_size)
print(summary(model, input_s, device='cpu'))
print(model._modules.items())
pass
| 927 | 24.777778 | 107 | py |
DeeperForensicsChallengeSolution | DeeperForensicsChallengeSolution-master/model/toy_predict.py | import sys
sys.path.append('..')
from eval_kit.detector import DeeperForensicsDetector
from model.models import get_efficientnet
import torch
import time
import glob
from PIL import Image
import torchvision.transforms as transforms
from facenet_pytorch import MTCNN, extract_face
import torch.nn as nn
from model.face_detector import DetectionPipeline
from albumentations.pytorch import ToTensorV2
from albumentations import Compose, Normalize, Resize, PadIfNeeded
import cv2
import numpy as np
def get_valid_transforms(size=300):
return Compose([
Resize(height=size, width=size, p=1.0),
PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT),
Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
# Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
# Resize(height=224, width=224, p=1.0),
ToTensorV2(p=1.0),
], p=1.0)
resnet_default_data_transforms = {
'test': transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
class Test_time_agumentation(object):
def __init__(self, is_rotation=True):
self.is_rotation = is_rotation
def __rotation(self, img):
"""
clockwise rotation 90 180 270
"""
img90 = img.rot90(-1, [2, 3]) # 1 逆时针; -1 顺时针
img180 = img.rot90(-1, [2, 3]).rot90(-1, [2, 3])
img270 = img.rot90(1, [2, 3])
return [img90, img180, img270]
def __inverse_rotation(self, img90, img180, img270):
"""
anticlockwise rotation 90 180 270
"""
img90 = img90.rot90(1, [2, 3]) # 1 逆时针; -1 顺时针
img180 = img180.rot90(1, [2, 3]).rot90(1, [2, 3])
img270 = img270.rot90(-1, [2, 3])
return img90, img180, img270
def __flip(self, img):
"""
Flip vertically and horizontally
"""
return [img.flip(2), img.flip(3)]
def __inverse_flip(self, img_v, img_h):
"""
Flip vertically and horizontally
"""
return img_v.flip(2), img_h.flip(3)
def tensor_rotation(self, img):
"""
img size: [H, W]
rotation degree: [90 180 270]
:return a rotated list
"""
# assert img.shape == (1024, 1024)
return self.__rotation(img)
def tensor_inverse_rotation(self, img_list):
"""
img size: [H, W]
rotation degree: [90 180 270]
:return a rotated list
"""
# assert img.shape == (1024, 1024)
return self.__inverse_rotation(img_list[0], img_list[1], img_list[2])
def tensor_flip(self, img):
"""
img size: [H, W]
:return a flipped list
"""
# assert img.shape == (1024, 1024)
return self.__flip(img)
def tensor_inverse_flip(self, img_list):
"""
img size: [H, W]
:return a flipped list
"""
# assert img.shape == (1024, 1024)
return self.__inverse_flip(img_list[0], img_list[1])
# 9 times
def TTA(model_, img, activation=nn.Softmax(dim=1)):
# original 1
outputs = activation(model_(img))
tta = Test_time_agumentation()
# 水平翻转 + 垂直翻转 2
flip_imgs = tta.tensor_flip(img)
for flip_img in flip_imgs:
outputs += activation(model_(flip_img))
# 2*3=6
for flip_img in [img, flip_imgs[0]]:
rot_flip_imgs = tta.tensor_rotation(flip_img)
for rot_flip_img in rot_flip_imgs:
outputs += activation(model_(rot_flip_img))
outputs /= 9
return outputs
def preprocess_image(images, device):
"""
Preprocesses the image such that it can be fed into our network.
During this process we envoke PIL to cast it into a PIL image.
:param image: numpy image in opencv form (i.e., BGR and of shape
:return: pytorch tensor of shape [1, 3, image_size, image_size], not
necessarily casted to cuda
"""
# print(images.shape)
preprocessed_images = None
# Preprocess using the preprocessing function used during training and
# casting it to PIL image
preprocess = resnet_default_data_transforms['test']
for image in images:
preprocessed_image = preprocess(Image.fromarray(image))
# Add first dimension as the network expects a batch
preprocessed_image = preprocessed_image.unsqueeze(0)
if preprocessed_images is None:
preprocessed_images = preprocessed_image
else:
preprocessed_images = torch.cat([preprocessed_images, preprocessed_image], 0)
preprocessed_images = preprocessed_images.to(device)
return preprocessed_images
def preprocess_image_2(images, device):
"""
Preprocesses the image such that it can be fed into our network.
During this process we envoke PIL to cast it into a PIL image.
:param image: numpy image in opencv form (i.e., BGR and of shape
:return: pytorch tensor of shape [1, 3, image_size, image_size], not
necessarily casted to cuda
"""
# print(images.shape)
preprocessed_images = None
# Preprocess using the preprocessing function used during training and
# casting it to PIL image
preprocess = get_valid_transforms()
for image in images:
sample = {'image': image}
sample = preprocess(**sample)
preprocessed_image = sample['image']
# Add first dimension as the network expects a batch
preprocessed_image = preprocessed_image.unsqueeze(0)
if preprocessed_images is None:
preprocessed_images = preprocessed_image
else:
preprocessed_images = torch.cat([preprocessed_images, preprocessed_image], 0)
preprocessed_images = preprocessed_images.to(device)
return preprocessed_images
def preprocess_image_3(images, device):
"""
Preprocesses the image such that it can be fed into our network.
During this process we envoke PIL to cast it into a PIL image.
:param image: numpy image in opencv form (i.e., BGR and of shape
:return: pytorch tensor of shape [1, 3, image_size, image_size], not
necessarily casted to cuda
"""
# print(images.shape)
preprocessed_images = None
# Preprocess using the preprocessing function used during training and
# casting it to PIL image
for image in images:
image = cv2.resize(image, dsize=(224, 224)).astype(np.float32)
image /= 255
image = np.transpose(image, (2, 0, 1))
preprocessed_image = torch.from_numpy(image)
# Add first dimension as the network expects a batch
preprocessed_image = preprocessed_image.unsqueeze(0)
if preprocessed_images is None:
preprocessed_images = preprocessed_image
else:
preprocessed_images = torch.cat([preprocessed_images, preprocessed_image], 0)
preprocessed_images = preprocessed_images.to(device)
return preprocessed_images
def predict_with_model(image, model, post_function=nn.Softmax(dim=1), device='cpu', is_tta=False):
"""
Predicts the label of an input image. Preprocesses the input image and
casts it to cuda if required
:param image: numpy image
:param model: torch model with linear layer at the end
:param post_function: e.g., softmax
:param cuda: enables cuda, must be the same parameter as the model
:return: prediction (1 = fake, 0 = real)
"""
# Preprocess
# preprocessed_image = preprocess_image(image, device)
# preprocessed_image = preprocess_image_2(image, device)
preprocessed_image = preprocess_image_3(image, device)
# Model prediction
model.eval()
with torch.no_grad():
if is_tta:
output = TTA(model, preprocessed_image, activation=post_function)
else:
output = model(preprocessed_image)
output = post_function(output)
# Cast to desired
_, prediction = torch.max(output, 1) # argmax
prediction = prediction.cpu().numpy()
return prediction, output.cpu().numpy()
def clip_pred(val, threshold=0.2):
if val < threshold:
val = threshold
elif val > (1 - threshold):
val = 1 - threshold
return val
class ToyPredictor(DeeperForensicsDetector):
def __init__(self):
super(ToyPredictor, self).__init__()
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.is_ensamble = True
if not self.is_ensamble:
# model, _, *_ = model_selection('se_resnext101_32x4d', num_out_classes=2, dropout=0.5)
model = get_efficientnet(model_name='efficientnet-b0', num_classes=2, pretrained=False)
model_path = './weight/output_my_aug/efn-b0_LS_27_loss_0.2205.pth'
model.load_state_dict(torch.load(model_path, map_location=self.device))
print('Load model in:', model_path)
self.model = model.to(self.device)
else:
self.models = self.load_models(model_names=['efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2'],
model_paths=['./weight/output_my_aug/efn-b0_LS_27_loss_0.2205.pth',
'./weight/output_my_aug/efn-b1_LS_6_loss_0.1756.pth',
'./weight/output_my_aug/efn-b2_LS_12_loss_0.1728.pth'])
self.mtcnn = MTCNN(margin=14, keep_all=True, factor=0.6, device=self.device).eval()
def load_models(self, model_names, model_paths):
models = []
for i in range(len(model_names)):
model = get_efficientnet(model_name=model_names[i], num_classes=2, pretrained=False)
model_path = model_paths[i]
model.load_state_dict(torch.load(model_path, map_location=self.device))
print('Load model ', i, 'in:', model_path)
model.to(self.device)
models.append(model)
return models
def predict(self, video_frames):
# Here, we just simply return possibility of 0.5
# Define face detection pipeline
detection_pipeline = DetectionPipeline(detector=self.mtcnn)
# Load frames and find faces
faces = detection_pipeline(video_frames)
if faces.shape == (0,): # 排除没有提取到人脸的视频
print('No face detect in video!')
pred = 0.5
else:
if not self.is_ensamble:
prediction, output = predict_with_model(faces, self.model, device=self.device, is_tta=False)
pred = output[:, 1]
pred = sum(pred) / len(pred)
pred = clip_pred(pred, threshold=0.01)
else:
pred = []
for model in self.models:
prediction, output = predict_with_model(faces, model, device=self.device, is_tta=False)
pred_i = output[:, 1]
pred_i = sum(pred_i) / len(pred_i)
pred.append(pred_i)
pred = sum(pred) / len(pred)
pred = clip_pred(pred, threshold=0.01)
return pred
| 11,159 | 34.884244 | 113 | py |
AAAI-23.6040 | AAAI-23.6040-master/scripts/prediction_stepmania.py | import argparse
import json
import logging
import tempfile
from ast import literal_eval
from logging import getLogger
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
import torch
from notes_generator.constants import ConvStackType, NMELS
from notes_generator.models.onsets import SimpleOnsets
from notes_generator.prediction.predictor import Predictor, SMPredictorDDC
from notes_generator.prediction.step_mania.midi import (
create_from_dataframe as create_midi_stepmania,
)
from notes_generator.preprocessing import mel
logger = getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def preprocess_audio(
audio_path,
live_id,
bpm_info,
):
with tempfile.TemporaryDirectory() as tempdir:
mel.convert(
audio_path.name,
live_id,
audio_path.parent,
Path(tempdir),
aug_count=0,
bpm_info=bpm_info,
)
mel_data, mel_meta = _load_mel(Path(tempdir) / "mel.npz")
return mel_data, mel_meta
def prediction_main(
onset_model_path: Path,
audio_path: Path,
live_id: int,
midi_save_path: Path,
bpm_info: List[Tuple],
device: str = "cpu",
inference_chunk_length: int = 640,
):
mel_data, mel_meta = preprocess_audio(audio_path, live_id, bpm_info)
onset_model = SimpleOnsets(
NMELS,
1,
enable_condition=True,
enable_beats=True,
conv_stack_type=ConvStackType.v7,
num_layers=2,
onset_weight=64,
dropout=0.5,
inference_chunk_length=inference_chunk_length,
)
onset_model.load_state_dict(
torch.load(str(onset_model_path), map_location=torch.device(device))
)
sym_model = None
ddc_predictor = SMPredictorDDC(onset_model, sym_model, device)
midi_path = midi_save_path / f"{live_id}.mid"
logger.info("prediction start")
predict_and_save(ddc_predictor, mel_data, mel_meta, live_id, midi_path)
return
def predict_and_save(
predictor: Predictor,
mel: np.ndarray,
meta_data: Dict,
live_id: int,
midi_save_path: Path,
):
bpm_info = meta_data["bpm_info"]
scores_dict, probs = predictor.predict_all(mel, bpm_info)
logger.info("prediction complete")
# add live_notes_id to all songs (an unique ID for live_id and difficulty pair)
scores = []
for difficulty in sorted(predictor.difficulties):
# ex: live_id=1001, difficulty=20 -> live_diff_id = 10012
live_diff_id = str(live_id) + str(difficulty)[0]
for note in scores_dict[difficulty]:
note["live_notes_id"] = live_diff_id
scores += scores_dict[difficulty]
df_notes = pd.DataFrame.from_records(scores)
df_notes = df_notes[
["live_notes_id", "tap_time", "track_index", "is_long_head", "is_long_tail"]
]
midi = create_midi_stepmania(df_notes, bpm_info, live_id)
logger.info(f"midi save to {str(midi_save_path)}")
# create midi
midi.save(str(midi_save_path))
def _load_mel(mel_path: Path):
with mel_path.open("rb") as fp:
data = np.load(fp)
mel = data["mel"]
meta_path = mel_path.parent / "meta.json"
with meta_path.open() as fp:
meta_data = json.load(fp)
return mel, meta_data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--onset_model_path", type=Path, required=True)
parser.add_argument("--audio_path", type=Path, required=True)
parser.add_argument("--midi_save_path", type=Path)
parser.add_argument("--bpm_info", type=str)
parser.add_argument("--inference_chunk_length", type=int, default=640)
args = parser.parse_args()
onset_model_path = Path(args.onset_model_path)
assert onset_model_path.exists()
audio_path = Path(args.audio_path)
assert audio_path.exists()
midi_save_path = Path(args.midi_save_path)
if not midi_save_path.exists():
midi_save_path.mkdir()
bpm_info = literal_eval(args.bpm_info)
prediction_main(
onset_model_path,
audio_path,
live_id=0,
midi_save_path=midi_save_path,
bpm_info=bpm_info,
inference_chunk_length=args.inference_chunk_length,
)
| 4,249 | 28.929577 | 84 | py |
AAAI-23.6040 | AAAI-23.6040-master/scripts/model_test.py | import argparse
import os
from datetime import datetime
from pathlib import Path
from torch.utils.data.dataloader import DataLoader
from notes_generator.constants import *
from notes_generator.models.onsets import SimpleOnsets
from notes_generator.training.evaluate import evaluate_test
from notes_generator.training.loader import OnsetTestDataset
from notes_generator.training.model_tester import (
LoaderConfig,
ModelConfig,
ModelTester,
load_local_models,
)
class OnsetLoaderConfig(LoaderConfig):
score_base_path: Path
audio_base_path: Path
live_ids: List[int]
with_beats: bool
app_name: AppName
class OnsetModelConfig(ModelConfig):
input_features: int
output_features: int
num_layers: int
enable_condition: bool
enable_beats: bool
inference_chunk_length: int
onset_weight: int
conv_stack_type: ConvStackType
class OnsetModelTester(ModelTester):
def __init__(self):
super(OnsetModelTester, self).__init__()
def _evaluate(self, model, loaders, difficulties, device_name):
return evaluate_test(model, loaders, difficulties, device_name)
def _get_test_loader(self, loader_config: LoaderConfig, difficulty: int, batch_size: int):
dataset = OnsetTestDataset(diff_type=difficulty, **loader_config._asdict())
return DataLoader(dataset, batch_size, shuffle=False)
def _get_test_model(self, model_config: ModelConfig, device: str):
return SimpleOnsets(**model_config._asdict()).to(device)
def _load_models(self, model_dir: str):
# Saving pattern for local files
local_model_dir = Path(model_dir) / "checkpoint"
if local_model_dir.exists():
model_list, model_source = load_local_models(local_model_dir, "model")
else:
raise FileNotFoundError(f"model directory doesn't exist: {local_model_dir}")
return model_list, model_source
def main():
# Path & parameter settings
root = Path(Path(__file__).parent / "..").resolve()
job_id = os.getenv("PJM_JOBID")
if job_id is None:
identifier = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
else:
identifier = job_id
parser = argparse.ArgumentParser()
parser.add_argument("model_dir")
parser.add_argument("--app_name", type=AppName, default=AppName.STEPMANIA)
parser.add_argument("--score_dir", type=str)
parser.add_argument("--mel_dir", type=str)
parser.add_argument("--seq_length", type=int, default=20480)
parser.add_argument("--batch", type=int, default=1)
parser.add_argument("--num_layers", type=int, default=2)
parser.add_argument("--onset_weight", type=int, default=64)
parser.add_argument("--with_beats", type=int, default=1)
parser.add_argument(
"--conv_stack_type", type=ConvStackType, default=ConvStackType.v1, choices=ConvStackType
)
parser.add_argument("--csv_save_dir", default=str(os.getcwd()))
parser.add_argument("--experiment_name", default=os.getenv("MLFLOW_EXPERIMENT_NAME"))
args = parser.parse_args()
model_dir = args.model_dir
app_name = args.app_name
score_dir = Path(args.score_dir)
mel_dir = Path(args.mel_dir)
seq_length = args.seq_length
batch_size = args.batch
csv_file_path = str(os.path.join(args.csv_save_dir, identifier + "-result.csv"))
experiment_name = args.experiment_name
num_layers = args.num_layers
onset_weight = args.onset_weight
with_beats = False if args.with_beats == 0 else 1
conv_stack_type = args.conv_stack_type
inference_chunk_length = seq_length // FRAME
model_config = OnsetModelConfig(
input_features=NMELS,
output_features=1,
num_layers=num_layers,
enable_condition=True,
enable_beats=with_beats,
inference_chunk_length=inference_chunk_length,
onset_weight=onset_weight,
conv_stack_type=conv_stack_type,
)
loader_config = OnsetLoaderConfig(
score_base_path=score_dir,
audio_base_path=mel_dir,
live_ids=default_test_ids[app_name],
with_beats=with_beats,
app_name=app_name,
)
evaluator = OnsetModelTester()
evaluator.evaluate(
model_dir=model_dir,
model_config=model_config,
loader_config=loader_config,
app_name=app_name,
batch_size=batch_size,
csv_file_path=csv_file_path,
experiment_name=experiment_name,
)
if __name__ == "__main__":
"""Tool for test models.
This script runs test task for a specific run, given a directory containing steps, the script will test all models
and save result to both mlflow tracking server and local csv file.
Example
-------
$ python3 scripts/model_test.py data/onset_models
Attributes
----------
model_dir : str
The path of the models to be tested.
If <model_dir>/checkpoint exists, we assume its from local storage, and the 'checkpoint' sub-directory is
automatically added.
app_name : str
The name of the game
mel_dir : str
The path of the mel-spectrogram data.
score_dir : str
The path of the onset data.
seq_length : int
The sequence for the LSTM layer
batch : int
The minibatch size of the data.
num_layers : int
The number of hidden layers for the LSTM layer.
onset_weight : int
with_beats : int
Specify `1` or `0`.
If `1`, the beat array will be included to the input of the model.
conv_stack_type : str
The type of ConvStack layer.
csv_save_dir : str
The base directory of saving path of the output csv file.
experiment_name : str
The experiment name on mlflow tracking server.
"""
main()
| 5,789 | 30.639344 | 118 | py |
AAAI-23.6040 | AAAI-23.6040-master/scripts/onsets_train.py | import argparse
import logging
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
import mlflow
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR, CyclicLR
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from notes_generator.constants import *
from notes_generator.models.onsets import SimpleOnsets
from notes_generator.models.util import MyDataParallel
from notes_generator.training import train
from notes_generator.training.mlflow import MlflowRunner
from notes_generator.training.loader import OnsetLoader, \
get_difficulty_type_enum
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)s %(name)s %(lineno)d:%(message)s"
)
def load_pretrain_model(model, state_dict_path: str, device: str):
logger.info(f"loading pretrained model from {state_dict_path}")
model_dict = torch.load(state_dict_path, map_location=device)
new_dict = OrderedDict()
excludes = ("onset_linear", "onset_sequence")
for key in model_dict:
is_exclude = any(key.startswith(ex) for ex in excludes)
if not is_exclude:
new_dict[key] = model_dict[key]
model.load_state_dict(new_dict, strict=False)
logger.info(f"successfully loaded pretrained model.")
return
def parse_int_bool(param_name: str, value: int) -> bool:
if value == 0:
return False
elif value == 1:
return True
else:
raise ValueError(f"Parameter {param_name} must be either 0 or 1.")
def parse_args(parser=None):
parser = argparse.ArgumentParser() if parser is None else parser
parser.add_argument("--app_name", type=AppName, default=AppName.STEPMANIA)
parser.add_argument("--model_dir", type=str)
parser.add_argument("--score_dir", type=str)
parser.add_argument("--mel_dir", type=str)
parser.add_argument("--resume", type=int, default=0)
parser.add_argument("--epochs", type=int, default=200)
parser.add_argument("--batch", type=int, default=2)
parser.add_argument("--lr_start", type=float, default=5e-7)
parser.add_argument("--lr_end", type=float, default=5e-6)
parser.add_argument("--seq_length", type=int, default=1600)
parser.add_argument("--aug_count", type=int, default=0)
parser.add_argument("--num_layers", type=int, default=2)
parser.add_argument("--onset_weight", type=int, default=64)
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--fuzzy_width", type=int, default=1)
parser.add_argument("--fuzzy_scale", type=float, default=1.0)
parser.add_argument("--with_beats", type=int, default=1)
parser.add_argument("--difficulties", type=str, default="")
parser.add_argument("--send_model", type=float, default=0)
parser.add_argument("--n_saved_model", type=int, default=40)
parser.add_argument("--augmentation_setting", type=str, default=None)
parser.add_argument("--warmup_steps", type=int, default=0)
parser.add_argument("--weight_decay", type=float, default=0)
parser.add_argument("--is_parallel", type=int, default=0)
parser.add_argument(
"--lr_scheduler", type=str, default="CyclicLR", choices=("CyclicLR", "CosineAnnealingLR")
)
parser.add_argument("--eta_min", type=float, default=1e-6)
parser.add_argument(
"--conv_stack_type", type=ConvStackType, default=ConvStackType.v1, choices=ConvStackType
)
parser.add_argument("--rnn_dropout", type=float, default=0)
parser.add_argument("--pretrained_model_path", default=None)
parser.add_argument("--disable_mlflow", action="store_true")
args = parser.parse_args()
return args
def main(args=None):
if args is None:
args = parse_args()
root = Path(Path(__file__).parent / "..").resolve()
model_dir_default = {
AppName.STEPMANIA_F: root / "data/step_mania/model",
AppName.STEPMANIA_I: root / "data/step_mania/model",
AppName.STEPMANIA: root / "data/step_mania/model",
}
score_dir_default = {
AppName.STEPMANIA_F: root / "data/step_mania/score_onsets_1",
AppName.STEPMANIA_I: root / "data/step_mania/score_onsets_1",
AppName.STEPMANIA: root / "data/step_mania/score_onsets_1",
}
mel_dir_default = {
AppName.STEPMANIA_F: root / "data/step_mania/mel_log",
AppName.STEPMANIA_I: root / "data/step_mania/mel_log",
AppName.STEPMANIA: root / "data/step_mania/mel_log",
}
app_name = args.app_name
model_dir = model_dir_default[app_name] if args.model_dir is None else Path(args.model_dir)
score_dir = score_dir_default[app_name] if args.score_dir is None else Path(args.score_dir)
mel_dir = mel_dir_default[app_name] if args.mel_dir is None else Path(args.mel_dir)
diff_type = get_difficulty_type_enum(app_name)
device = "cuda" if torch.cuda.is_available() else "cpu"
batch_size = args.batch
if args.n_saved_model < 1:
raise ValueError("n_saved_model should be greater than 0")
send_model = parse_int_bool("send_model", args.send_model)
with_beats = parse_int_bool("with_beats", args.with_beats)
if args.difficulties != "":
difficulties = tuple(d.value for d in diff_type)
else:
from ast import literal_eval
difficulties = literal_eval(args.difficulties)
if args.augmentation_setting:
mlflow.log_artifact(args.augmentation_setting)
onset_loader = OnsetLoader(
score_base_path=score_dir,
audio_base_path=mel_dir,
seq_length=args.seq_length,
skip_step=1000,
aug_count=args.aug_count,
diff_types=difficulties,
with_other_condition=False,
with_beats=with_beats,
distinguish_downbeats=distinguish_downbeats,
app_name=app_name,
augmentation_setting=args.augmentation_setting,
)
model = SimpleOnsets(
input_features=NMELS,
output_features=1,
num_layers=args.num_layers,
enable_condition=True,
enable_beats=with_beats,
dropout=args.dropout,
onset_weight=args.onset_weight,
inference_chunk_length=args.seq_length // FRAME,
conv_stack_type=args.conv_stack_type,
rnn_dropout=args.rnn_dropout,
).to(device)
if args.pretrained_model_path:
load_pretrain_model(model, args.pretrained_model_path, device)
if args.is_parallel == 1:
model = MyDataParallel(model)
starttime = datetime.now()
logger.info(f"train start: {device} {starttime}")
train_dataset = onset_loader.dataset("train", is_shuffle=True)
train_loader = DataLoader(train_dataset, batch_size, shuffle=False, drop_last=True)
valid_dataset = onset_loader.dataset("validation", is_shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size, shuffle=False, drop_last=True)
optimizer = torch.optim.Adam(model.parameters(), args.lr_start, weight_decay=args.weight_decay)
if args.lr_scheduler == "CyclicLR":
lr_scheduler = CyclicLR(optimizer, args.lr_start, args.lr_end, 1000, cycle_momentum=False)
elif args.lr_scheduler == "CosineAnnealingLR":
lr_scheduler = CosineAnnealingLR(optimizer, args.epochs * 100, eta_min=args.eta_min)
else:
raise ValueError
writer = SummaryWriter(model_dir)
model_dir = Path(model_dir)
for f in model_dir.glob("events.*"):
f.unlink()
train.train_ignite(
args.epochs,
model,
model_dir,
batch_size,
train_loader,
valid_loader,
optimizer,
lr_scheduler,
writer,
device,
onset_only=True,
fuzzy_width=args.fuzzy_width, # if >1 fuzzy label is enabled
fuzzy_scale=args.fuzzy_scale,
resume_checkpoint=args.resume,
lr_find=False,
warmup_steps=args.warmup_steps,
send_model=send_model,
n_saved_model=args.n_saved_model,
eval_tolerance=eval_tolerance,
disable_mlflow=args.disable_mlflow,
)
endtime = datetime.now()
logger.info(f"train complete: {endtime - starttime}")
if __name__ == "__main__":
with MlflowRunner(parse_args) as mr:
main(mr.args)
| 8,247 | 39.431373 | 99 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/training/evaluate.py | import sys
from collections import defaultdict
from typing import List, Type
import numpy as np
import torch
from mir_eval.onset import f_measure as evaluate_onset
from mir_eval.transcription import match_notes, precision_recall_f1_overlap as evaluate_notes
from mir_eval.util import midi_to_hz
from notes_generator.constants import *
from notes_generator.models.onsets import OnsetsBase
eps = sys.float_info.epsilon
MIN_MIDI = 21
def extract_notes(onsets, frames, onset_threshold=0.5, frame_threshold=0.5):
"""
Finds the note timings based on the onsets and frames information
Parameters
----------
onsets: torch.FloatTensor, shape = [frames, bins]
frames: torch.FloatTensor, shape = [frames, bins]
onset_threshold: float
frame_threshold: float
Returns
-------
pitches: np.ndarray of bin_indices
intervals: np.ndarray of rows containing (onset_index, offset_index)
"""
onsets = (onsets > onset_threshold).cpu().to(torch.uint8)
frames = (frames > frame_threshold).cpu().to(torch.uint8)
onset_diff = torch.cat([onsets[:1, :], onsets[1:, :] - onsets[:-1, :]], dim=0) == 1
pitches = []
intervals = []
for nonzero in onset_diff.nonzero():
frame = nonzero[0].item()
pitch = nonzero[1].item()
onset = frame
offset = frame
while onsets[offset, pitch].item() or frames[offset, pitch].item():
offset += 1
if offset == onsets.shape[0]:
break
if offset > onset:
pitches.append(pitch)
intervals.append([onset, offset])
return np.array(pitches), np.array(intervals)
def notes_to_frames(pitches, intervals, shape):
"""
Takes lists specifying notes sequences and return
Parameters
----------
pitches: list of pitch bin indices
intervals: list of [onset, offset] ranges of bin indices
shape: the shape of the original piano roll, [n_frames, n_bins]
Returns
-------
time: np.ndarray containing the frame indices
freqs: list of np.ndarray, each containing the frequency bin indices
"""
roll = np.zeros(tuple(shape))
for pitch, (onset, offset) in zip(pitches, intervals):
roll[onset:offset, pitch] = 1
time = np.arange(roll.shape[0])
freqs = [roll[t, :].nonzero()[0] for t in time]
return time, freqs
def to_onehot_score(score_l, score_r, device):
onehot_l = torch.zeros((score_l.shape[0], score_l.shape[1], 8)).long().to(device)
onehot_l = onehot_l.scatter(2, score_l.unsqueeze(-1), 1)[:, :, 1:]
onehot_r = torch.zeros((score_r.shape[0], score_r.shape[1], 8)).long().to(device)
onehot_r = onehot_r.scatter(2, score_r.unsqueeze(-1), 1)[:, :, 1:]
return torch.cat([onehot_l, onehot_r], dim=-1)
def prepare_mireval(target, pred):
"""Prepare features in a form that can be used in mir_eval.
Parameters
----------
target : torch.Tensor
pred : torch.Tensor
Returns
-------
i_ref : np.ndarray
The array containing (onset_index, offset_index) of the target.
p_ref : np.ndarray
The array containing bin_indices of the target.
i_est : np.ndarray
The array of (onset_index, offset_index) of the prediction.
p_est : np.ndarray
The array containing bin_indices of the prediction.
"""
p_ref, i_ref = extract_notes(target, target)
p_est, i_est = extract_notes(pred, pred)
scaling = HOP_LENGTH / SAMPLE_RATE
i_ref = (i_ref * scaling).reshape(-1, 2)
p_ref = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_ref])
i_est = (i_est * scaling).reshape(-1, 2)
p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])
return i_ref, p_ref, i_est, p_est
def evaluate(model, loader, device, tolerance=0.05):
metrics = defaultdict(list)
for batch in loader:
pred = model.predict(batch).long()
target = batch["onset"].long().to(device)
if pred.shape[-1] > 1:
target = to_onehot_score(target[:, :, 0], target[:, :, 1], device)
pred = to_onehot_score(pred[:, :, 0], pred[:, :, 1], device)
batch_size = batch["onset"].size(0)
for b in range(batch_size):
i_ref, p_ref, i_est, p_est = prepare_mireval(target[b], pred[b])
p, r, f, o = evaluate_notes(
i_ref, p_ref, i_est, p_est, offset_ratio=None, onset_tolerance=tolerance
)
metrics["metric/note/precision"].append(p)
metrics["metric/note/recall"].append(r)
metrics["metric/note/f1"].append(f)
metrics["metric/note/overlap"].append(o)
onset_ref = np.array([o[0] for o in i_ref])
onset_est = np.array([o[0] for o in i_est])
f_o, p_o, r_o = evaluate_onset(onset_ref, onset_est, tolerance)
metrics["metric/onset/precision"].append(p_o)
metrics["metric/onset/recall"].append(r_o)
metrics["metric/onset/f1"].append(f_o)
return metrics
def prec_rec_f1(tp, fp, fn):
prec = 0 if tp + fp == 0 else tp / (tp + fp)
rec = 0 if tp + fn == 0 else tp / (tp + fn)
f1 = 0 if 2 * tp + fp + fn == 0 else 2 * tp / (2 * tp + fp + fn)
return prec, rec, f1
def predict_test(model: Type[OnsetsBase], loader, result_dict, device):
for batch in loader:
_, proba = model.predict_with_probs(batch)
target = batch["onset"].long().to(device)
live_ids = batch["live_id"]
difficulties = batch["condition"]
batch_size = batch["onset"].size(0)
# Assume that `b` contains data of whole song.
for b in range(batch_size):
live_id = int(live_ids[b])
diff = int(difficulties[b, 0])
result_dict[live_id][diff] = {
"proba": proba[b],
"target": target[b],
}
return result_dict
def micro_metrics(
probs: List[torch.Tensor], targets: List[torch.Tensor], threshold: float, tolerance: float
):
"""Calculate micro averaged metrics.
Parameters
----------
probs : List[torch.Tensor]
The list containing model predictions for each charts.
targets : List[torch.Tensor]
The list containing ground truth labels for each charts.
threshold : float
The frame having probability above this threshold is considered
that the note exists on the frame.
tolerance : float
The minimum tolerance for onset matching.
Returns
-------
eval_results : Dict[str, float]
The dict containing evaluated metrics.
{metric_key: metric_value}
Notes
-----
Explanation about micro average:
Sum up TP, FP, FN for all charts,
then calculate precision, recall, F1 at the end.
"""
metrics = defaultdict(list)
tp_micro, fp_micro, fn_micro = 0, 0, 0
for prob, tgt in zip(probs, targets):
pred = prob >= threshold
i_ref, p_ref, i_est, p_est = prepare_mireval(tgt, pred)
matching = match_notes(
i_ref, p_ref, i_est, p_est, offset_ratio=None, onset_tolerance=tolerance
)
tp = len(matching)
fp = len(p_est) - len(matching)
fn = len(p_ref) - len(matching)
tp_micro += tp
fp_micro += fp
fn_micro += fn
p_micro, r_micro, f_micro = prec_rec_f1(tp_micro, fp_micro, fn_micro)
metrics["metric/note/precision.micro"].append(p_micro)
metrics["metric/note/recall.micro"].append(r_micro)
metrics["metric/note/f1.micro"].append(f_micro)
# take the average
eval_results = dict()
for key, val in metrics.items():
eval_results[key] = np.array(val).mean()
return eval_results
def chart_metrics(
probs: List[torch.Tensor], targets: List[torch.Tensor], threshold: float, tolerance: float
):
"""Calculate chart averaged metrics.
Parameters
----------
probs : List[torch.Tensor]
The list containing model predictions for each charts.
targets : List[torch.Tensor]
The list containing ground truth labels for each charts.
threshold : float
The frame having probability above this threshold is considered
that the note exists on the frame.
tolerance : float
The minimum tolerance for onset matching.
Returns
-------
eval_results : Dict[str, float]
The dict containing evaluated metrics.
{metric_key: metric_value}
Notes
-----
Explanation aboun chart average:
Calculate precision, recall, F1 for each chart,
then take an average of these metrics as final result.
"""
metrics = defaultdict(list)
for prob, tgt in zip(probs, targets):
pred = prob > threshold
i_ref, p_ref, i_est, p_est = prepare_mireval(tgt, pred)
matching = match_notes(
i_ref, p_ref, i_est, p_est, offset_ratio=None, onset_tolerance=tolerance
)
tp = len(matching)
fp = len(p_est) - len(matching)
fn = len(p_ref) - len(matching)
p, r, f = prec_rec_f1(tp, fp, fn)
metrics["metric/note/precision.chart"].append(p)
metrics["metric/note/recall.chart"].append(r)
metrics["metric/note/f1.chart"].append(f)
# take the average
eval_results = dict()
for key, val in metrics.items():
eval_results[key] = np.array(val).mean()
return eval_results
def evaluate_test(model, loaders, difficulties, device, tolerance=0.05):
# the dict containing prediction results: {live_id: {diff: {res_key: res_value}}}
pred_results = defaultdict(lambda: defaultdict(dict))
for diff in difficulties:
test_loader = loaders[diff.value]
pred_results = predict_test(model, test_loader, pred_results, device)
# calculate metrics for each difficulties
eval_dict = dict()
threshold = 0.5
for diff in difficulties:
probs = [
pred_results[l_id][diff_]["proba"]
for l_id, diffs in pred_results.items()
for diff_ in diffs
if diff_ == diff.value
]
targets = [
pred_results[l_id][diff_]["target"]
for l_id, diffs in pred_results.items()
for diff_ in diffs
if diff_ == diff.value
]
eval_results = chart_metrics(probs, targets, threshold, tolerance)
eval_results.update(micro_metrics(probs, targets, threshold, tolerance))
eval_dict[diff.name] = eval_results
# calculate metrics for all difficulties
probs = [
pred_results[l_id][diff_]["proba"]
for l_id, diffs in pred_results.items()
for diff_ in diffs
]
targets = [
pred_results[l_id][diff_]["target"]
for l_id, diffs in pred_results.items()
for diff_ in diffs
]
eval_results = chart_metrics(probs, targets, threshold, tolerance)
eval_results.update(micro_metrics(probs, targets, threshold, tolerance))
eval_dict["all_diff"] = eval_results
return eval_dict
| 11,028 | 33.145511 | 94 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/training/model_tester.py | import csv
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, TextIO, Type, Union
import mlflow
import torch
import torch.multiprocessing as mp
import yaml
from torch import nn
from torch.utils.data.dataloader import DataLoader
from notes_generator.constants import *
LoaderConfig = NamedTuple
ModelConfig = NamedTuple
EvalDict = Dict[str, Dict[str, Union[int, float]]] # {diff_name: {metric_name: metric_value}}
class ModelInfo(NamedTuple):
model_name: str
model_path: str
step: int
def load_local_models(model_dir: Path, prefix: str):
model_list = []
glob_ptn = prefix + "*"
for model_path in model_dir.glob(glob_ptn):
model_list.append(
ModelInfo(
model_name=model_path.stem,
model_path=str(model_path),
step=int(model_path.stem.split("_")[-1]),
)
)
return model_list, "local"
def load_mlflow_models(model_dir: str):
model_list = []
# Traditional way to search for models in directories
for relative_path, sub_dir, files in os.walk(model_dir):
for file in files:
if file.endswith(".pth"):
ml_model_path = Path(relative_path).parent / "MLmodel"
with open(ml_model_path, "r") as model_info_file:
model_info = yaml.safe_load(model_info_file)
run_id = model_info["run_id"]
artifact_path = model_info["artifact_path"]
if artifact_path.split("_")[-1].isnumeric():
step = int(artifact_path.split("_")[-1])
else:
step = 0
model_list.append(
ModelInfo(
model_name=run_id + "_" + artifact_path,
model_path=os.path.join(relative_path, file),
step=step,
)
)
return model_list, "mlflow"
def _save_result(csv_file: TextIO, model_source: str, result_dict: Dict):
header_written = False
csv_writer = csv.writer(csv_file)
if model_source == "local":
header = ["model_name", "difficulty"]
for key in result_dict:
model_name = result_dict[key]["model_name"]
for difficulty_key in result_dict[key]["eval_result"].keys():
row = [model_name, difficulty_key]
if not header_written:
header.extend(result_dict[key]["eval_result"][difficulty_key].keys())
csv_writer.writerow(header)
header_written = True
row.extend(result_dict[key]["eval_result"][difficulty_key].values())
csv_writer.writerow(row)
# Log metrics one-by-one to mlflow tracking server
for metric_key in result_dict[key]["eval_result"][difficulty_key]:
metric_name = difficulty_key + "/" + metric_key
metric_scalar = result_dict[key]["eval_result"][difficulty_key][metric_key]
print(f"Writing metric: {metric_name}@{model_name}")
mlflow.log_metric(
key=metric_name,
value=metric_scalar,
step=result_dict[key]["step"],
)
elif model_source == "mlflow":
header = ["run_id", "difficulty"]
for key in result_dict:
run_id = result_dict[key]["model_name"].split("_")[0]
for difficulty_key in result_dict[key]["eval_result"].keys():
row = [run_id, difficulty_key]
if not header_written:
header.extend(result_dict[key]["eval_result"][difficulty_key].keys())
csv_writer.writerow(header)
header_written = True
row.extend(result_dict[key]["eval_result"][difficulty_key].values())
csv_writer.writerow(row)
# Log metrics one-by-one to mlflow tracking server
for metric_key in result_dict[key]["eval_result"][difficulty_key]:
metric_name = run_id + "/" + difficulty_key + "/" + metric_key
metric_scalar = result_dict[key]["eval_result"][difficulty_key][metric_key]
print(f"Writing metric: {metric_name}@{key}")
mlflow.log_metric(
key=metric_name,
value=metric_scalar,
step=result_dict[key]["step"],
)
else:
raise NotImplementedError("Invalid model source")
class ModelTester:
"""This class is for performing batch evaluation.
Usage
-----
1. Make a subclass inheriting this class
2. Override methods below and implement concrete method:
* _evaluate
* _get_test_loader
* _get_test_models
* _load_models
3. Run evaluate()
"""
def _evaluate(
self,
model: nn.Module,
loaders: List[DataLoader],
difficulties: Type[Enum],
device_name: str,
) -> EvalDict:
"""This method implements concrete evaluation process.
Parameters
----------
model : nn.Module
The model which is evaluated.
loaders : List[DataLoader]
The loader class feeding test data.
difficulties : Type[Enum]
The Enum class defining difficulty list for each game.
device_name : str
Device used for evaluation, should be acceptable by torch.device()
Returns
-------
result_dict : EvalDict
The dictionary containing evaluation results.
The structure is below:
{diff_name: {metric_key: metric_value}}
"""
raise NotImplementedError
def _get_test_loader(
self, loader_config: LoaderConfig, difficulty: int, batch_size: int
) -> DataLoader:
"""This method implements how to load a data loader class.
Parameters
----------
loader_config : LoaderConfig
The named tuple containing loader configurations
difficulty : int
The difficulty level as integer mapping to constant.py
batch_size : int
The size of minibatch used in evaluation.
Returns
-------
data_loader : DataLoader
The data loader class used in evaluation
"""
raise NotImplementedError
def _get_test_model(self, model_config: ModelConfig, device: str):
"""This method implements how to load a concrete model with parameters.
Parameters
----------
model_config : ModelConfig
The named tuple containing model configurations
device : str
Device used for evaluation, should be acceptable by torch.device()
Returns
-------
model: nn.Module
The model object for the specified model path.
The model is assumed that either trained parameters have been loaded
and the model has been sent to specified device.
"""
raise NotImplementedError
def _load_models(self, model_dir: str):
"""This method implements how to load models in given directory.
Parameters
----------
model_dir : str
The directory to the models to be tested
Returns
----------
Model information : List[ModelInfo]
A list of ModelInfo containing model's information
Model source : str
The source of the model is automatically inferred from the given directory, if <model_dir>/checkpoint exists, we
assume its from local saved models; otherwise we assume its from mlflow server and search for files that ends
with ".pth" and its corresponding "MLmodel" file to get the run_id
"""
raise NotImplementedError
def evaluate(
self,
model_dir: str,
model_config: ModelConfig,
loader_config: LoaderConfig,
batch_size: int,
app_name: AppName,
csv_file_path: str,
experiment_name: str,
):
"""This method runs evaluation and save results
Parameters
----------
model_dir : str
The directory to the models to be tested
model_config : ModelConfig
The named tuple containing model configurations
loader_config : LoaderConfig
The named tuple containing loader configurations
batch_size : int
The minibatch size of test data
app_name : AppName
The name of the game
csv_file_path : str
The path to csv file in which the result will be stored
experiment_name : str
The result will be sent to mlflow server with this experiment name
"""
model_list, model_source = self._load_models(model_dir)
difficulties = get_difficulty_type_enum(app_name)
loaders = {
d.value: self._get_test_loader(loader_config, d.value, batch_size) for d in difficulties
}
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cuda":
eval_result = self._evaluate_on_gpu(
model_list,
model_config,
loaders,
app_name,
)
else:
eval_result = self._evaluate_on_cpu(
model_list,
model_config,
loaders,
app_name,
)
self._save_result(
result_dict=eval_result,
csv_file_path=csv_file_path,
experiment_name=experiment_name,
model_source=model_source,
)
def _evaluate_model(
self,
model_info: ModelInfo,
model_config: ModelConfig,
loaders: Dict[int, DataLoader],
result_dictionary: Any,
device_name: str,
app_name: AppName,
):
"""This method runs evaluation on designated device.
Parameters
----------
model_info : ModelInfo
The information of the model to be tested. Should contain name, path and whether with steps
model_config : ModelConfig
The named tuple containing model configurations
loader_config : LoaderConfig
The named tuple containing loader configurations
result_dictionary : Dict or torch.multiprocessing.Manager.dict()
The dictionary to save results
device_name : str
Device to be used for the test
app_name : AppName
The name of the game
"""
print(f"Evaluating model: {model_info.model_name} on device: {device_name}")
eval_dict = {}
model = self._get_test_model(model_config, device_name)
# If trained with data-parallel, should manually call state_dict
try:
state_dict = torch.load(model_info.model_path, map_location=torch.device(device_name))
model.load_state_dict(state_dict)
except:
model.load_state_dict(
torch.load(
model_info.model_path, map_location=torch.device(device_name)
).module.state_dict()
)
difficulties = get_difficulty_type_enum(app_name)
eval_dict = self._evaluate(model, loaders, difficulties, device_name)
result_dictionary[model_info.model_path] = {
"eval_result": eval_dict,
"model_name": model_info.model_name,
"step": model_info.step,
}
def _evaluate_on_cpu(
self,
model_list: list,
model_config: ModelConfig,
loaders: Dict[int, DataLoader],
app_name: AppName,
) -> Dict:
"""This method runs evaluation task on CPU environment.
Parameters
----------
model_list : list
The list of the models to be tested, list of ModelInfo
model_config : ModelConfig
The named tuple containing model configurations
loader_config : LoaderConfig
The named tuple containing loader configurations
app_name : AppName
The name of the game
Returns
-------
Dict
Cascaded result dictionary, format:
{model_name:{difficulty:{metric_name:value}}}
"""
res_dict = {}
for model_info in model_list:
self._evaluate_model(
model_info=model_info,
model_config=model_config,
loaders=loaders,
result_dictionary=res_dict,
device_name="cpu",
app_name=app_name,
)
return res_dict
def _evaluate_on_gpu(
self,
model_list: list,
model_config: ModelConfig,
loaders: Dict[int, DataLoader],
app_name: AppName,
) -> Dict:
"""This method runs evaluation task on GPU environment.
Parameters
----------
model_list : list
The list of the models to be tested, list of ModelInfo
model_config : ModelConfig
The named tuple containing model configurations
loader_config : LoaderConfig
The named tuple containing loader configurations
app_name : AppName
The name of the game
Returns
-------
Dict
Cascaded result dictionary, format:
{model_name:{difficulty:{metric_name:value}}}
"""
# Pytorch multiprocess settings
mp.set_start_method("spawn")
manager = mp.Manager()
res_dict = manager.dict()
gpu_count = torch.cuda.device_count()
print(f"Running Parallel Task With Parallel Process: {gpu_count}")
batch_count = len(model_list) // gpu_count + 1
for batch_index in range(batch_count):
process_list = []
print(f"Processing Batch {batch_index + 1}, total: {batch_count}, GPUs: {gpu_count}")
if batch_index < batch_count - 1:
process_num = gpu_count
else:
process_num = len(model_list) % gpu_count
for process_index in range(process_num):
model_info = model_list[batch_index * gpu_count + process_index]
assigned_device = f"cuda:{process_index}"
print(
f"Starting process #{process_index}, Assigned device: {assigned_device}, model:{model_info}"
)
p = mp.Process(
target=self._evaluate_model,
args=(
model_info,
model_config,
loaders,
res_dict,
assigned_device,
app_name,
),
)
process_list.append(p)
p.start()
for p in process_list:
p.join()
p.close()
print("Batch process complete")
return res_dict.copy()
def _save_result(
self,
result_dict: dict,
experiment_name: str,
csv_file_path: str,
model_source: str,
):
"""This method sends result to mlflow tracking server and save a copy to a csv file.
Parameters
----------
result_dict : Dict
Result data generated by the evaluation function.
experiment_name : str
The path to the onset data.
csv_file_path : str
The csv file to save the result.
model_source : str
The source of the model, can be locally saved model or saved automatically by mlflow.
"""
with mlflow.start_run():
run_name = os.getenv("MLFLOW_RUN_NAME")
if run_name:
mlflow.set_tag("mlflow.runName", run_name)
data_version = os.getenv("DATA_VERSION")
if data_version:
mlflow.set_tag("data.version", data_version)
with open(csv_file_path, "w") as csv_file:
_save_result(csv_file, model_source, result_dict)
| 16,385 | 34.777293 | 124 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/training/augmenation.py | import math
import typing
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
import torchaudio.functional as AF
import torchaudio.transforms as T
import yaml
from notes_generator.constants import FRAME, NMELS
Sample = typing.Dict[str, torch.Tensor]
class AugConfig(typing.NamedTuple):
function: str
probability: float
params: typing.Dict[str, typing.Any]
def load_augmentations(file_path: Path) -> typing.List[AugConfig]:
with file_path.open() as fp:
da = yaml.safe_load(fp)
return [AugConfig(**config) for config in da]
def apply_augmentation(augmentations: typing.List[AugConfig], data: Sample) -> Sample:
for aug in augmentations:
params = aug.params or dict()
if np.random.rand() < aug.probability:
data = globals()[aug.function](data, **params)
return data
def freq_shift(data: Sample, shift_min: float = -0.1, shift_max: float = 0.1) -> Sample:
audio = data["audio"]
r_min = int(audio.shape[1] * shift_min)
r_max = int(audio.shape[1] * shift_max)
r = np.random.randint(r_min, r_max + 1)
i = torch.arange(-r, audio.shape[1] - r) % audio.shape[1]
audio = audio[:, i]
data["audio"] = audio
return data
def freq_mask(data: Sample, mask_max: int = 4, mask_count: int = 1) -> Sample:
audio = data["audio"]
width = np.random.randint(1, mask_max + 1) # plus one for including mask_max
count = np.random.randint(1, mask_count + 1)
mean = torch.mean(audio)
for _ in range(count):
i = np.random.randint(audio.shape[1] - width)
audio[:, i : (i + width)] = mean
data["audio"] = audio
return data
def time_mask(data: Sample, mask_max: int = 4, mask_count: int = 1) -> Sample:
audio = data["audio"]
width = np.random.randint(1, mask_max + 1) # plus one for including mask_max
count = np.random.randint(1, mask_count + 1)
mean = torch.mean(audio)
for _ in range(count):
i = np.random.randint(audio.shape[1] - width)
audio[i : (i + width), :] = mean
data["audio"] = audio
return data
def white_noise(data: Sample, sigma: float = 0.03) -> Sample:
audio = data["audio"]
r = sigma * torch.randn(audio.shape, device=audio.device)
audio += r
data["audio"] = audio
return data
def freq_low_mask(data: Sample, min_mask_range: int = 10, max_mask_range: int = 19) -> Sample:
audio = data["audio"]
mean = torch.mean(audio)
p = np.random.randint(
min_mask_range, max_mask_range + 1
) # plus one for including max_mask_range
audio[:, -p:] = mean
data["audio"] = audio
return data
def freq_flip_audio(data: Sample) -> Sample:
audio = data["audio"]
audio = torch.flip(audio, dims=(1,))
data["audio"] = audio
return data
def mask_beats(data: Sample, drop_rate: float = 0.2) -> Sample:
if "beats" in data:
beats = data["beats"]
beats = F.dropout(beats, drop_rate)
data["beats"] = beats
return data
def _audio_stretch(array: torch.Tensor, rate: float):
"""Stretch the mel spectrogram tensor."""
audio_stretch = T.TimeStretch(n_freq=NMELS)
array_conv = array.transpose(-2, -1) # convert shape to (freq, timestep)
array_conv = torch.exp(array_conv)
# Add a dimension containing 2 elems for representing complex number
array_conv = array_conv.unsqueeze(-1)
array_conv = torch.cat([array_conv, torch.zeros(array_conv.shape)], dim=-1)
array_conv = audio_stretch(array_conv, rate) # shape: (freq, timestep, 2)
array_conv = AF.complex_norm(array_conv).type(array.type()) # shape: (freq, timestep)
array_conv = array_conv.transpose(-2, -1) # convert shape to (timestep, freq)
array_conv = torch.log(array_conv)
return array_conv
def _label_stretch(array: torch.Tensor, rate: float):
"""Stretch the tensor containing pulses without varying pulse width."""
# array.shape: (timestep, feature)
result_shape = (math.ceil(array.shape[0] / rate), array.shape[1])
array_conv = torch.zeros(*result_shape, dtype=array.dtype)
indices = torch.nonzero(array, as_tuple=True)
time_indices = torch.clone(indices[0])
times_conv = FRAME * time_indices + FRAME / 2 # calc the center time for each frames
time_indices_conv = torch.floor(times_conv / rate / FRAME).long()
indices_conv = (time_indices_conv, indices[1])
array_conv[indices_conv] = array[indices]
return array_conv
def time_stretch(data: Sample, min_rate: float, max_rate: float):
"""Apply time stretch to sample data.
Notes
-----
rate = 「倍速」
"""
# we don't consider shrinking operation (rate > 1)
assert 0 < min_rate < max_rate <= 1
audio = data["audio"] # shape: (timestep, freq)
onset = data["onset"] # shape: (timestep, 1)
rate = np.random.uniform(min_rate, max_rate)
audio_conv = _audio_stretch(audio, rate)
onset_conv = _label_stretch(onset, rate)
data["audio"] = audio_conv[: audio.shape[0]]
data["onset"] = onset_conv[: onset.shape[0]]
if "beats" in data:
beats = data["beats"] # shape: (timestep, 1)
beats_conv = _label_stretch(beats, rate)
data["beats"] = beats_conv[: beats.shape[0]]
return data
def abs_mel(data: Sample):
"""ABS Mel Log Value"""
data["audio"] = torch.abs(data["audio"])
return data
def time_stretch_abs(data: Sample, min_rate: float, max_rate: float):
data = time_stretch(data, min_rate, max_rate)
data["audio"] = torch.abs(data["audio"])
return data
| 5,571 | 30.480226 | 94 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/training/loader.py | import json
import random
import warnings
from pathlib import Path
from typing import Dict, Optional, Tuple
import numpy as np
import torch
from notes_generator.constants import *
from notes_generator.models.beats import gen_beats_array
from notes_generator.training import augmenation
def load(base_dir: Path, app_name: AppName):
difficulty_types = get_difficulty_type_enum(app_name)
data_path = base_dir / "dump.npz"
meta_path = base_dir / "meta.json"
with meta_path.open() as fp:
metadata = json.load(fp)
with data_path.open("rb") as fp:
data = np.load(fp)
# validation
# check if all keys in data are defined in enum `difficulty_types`
for key in data:
assert (
key in difficulty_types.__members__
), f"Wrong onset data: '{key}' is not defined in enum {difficulty_types}."
# load
scores = {d.value: data[d.name] for d in difficulty_types if d.name in data}
return metadata, scores
def load_audio(base_dir: Path, npz_file_pointer):
metadata = load_audio_meta(base_dir)
data = np.load(npz_file_pointer)
return metadata, data
def load_audio_meta(base_dir: Path):
meta_path = base_dir / "meta.json"
with meta_path.open() as fp:
metadata = json.load(fp)
return metadata
def iter_array(array, length, skip, params):
max_leng = len(array)
for i in range(skip, max_leng, length):
data = array[i : (i + length)]
if len(data) == length:
yield data, i, i + length, params
def convert_score(score):
"""Reshape onset data
Parameters
----------
score
Returns
-------
"""
ret = dict(label=score)
ret["onset"] = np.array((ret["label"] == 3), dtype=np.float)
ret["offset"] = np.array((ret["label"] == 1), dtype=np.float)
ret["frame"] = np.array((ret["label"] > 1), dtype=np.float)
return ret
def assert_length(arr, length: int):
"""Pad with zeros to ensure the array length
Parameters
----------
arr
length
Returns
-------
"""
if arr.shape[0] >= length:
return arr[:length]
length2 = length - arr.shape[0]
arr2 = np.zeros((length2, arr.shape[1]))
return np.concatenate([arr, arr2])
def validate_diff_types(diff_types, app_name):
diff_type_enum = get_difficulty_type_enum(app_name)
valid_values = [diff.value for diff in diff_type_enum]
for d in diff_types:
assert d in valid_values, f"diff_type: {d} is not defined within {app_name}"
return diff_types
class BaseLoader:
"""The base class for onset loader.
Parameters
----------
score_base_path : Path
A path to a directory containing onset labels.
Directories named live IDs are expected just below a level of the directory.
audio_base_path : Path
A path to a directory containing mel spectrogram data.
Directories named live IDs are expected just below a level of the directory.
device : Optional[str]
A desired device name where training is run. Default to `None`
seq_length : int
The desired length of the sequence loaded from this loader.
Default to `16000`.
(note) The length must be calculated in wav data form.
For example, if `10` frames are desired, specify `10 * FRAME` here.
skip_step : int
If `random_skip` is `False`, data will be loaded iteratively until the end,
shifting the start position by this value.
Default to `2000`.
(note) The length must be calculated in wav data form.
For example, if `10` frames are desired, specify `10 * FRAME` here.
In addition, skip_step must satisfy following condition:
`FRAME <= skip_step <= seq_length`
test_split : bool
If `True`, only specified live IDs to `data_ids` are loaded.
Otherwise, all live_IDs of existing train data are loaded.
Default to `True`.
aug_count : int
A desired number of augmented data to be included in loaded data.
Default to `0`.
diff_types : Tuple
Desired condition values to be loaded.
Condition values must be ones defined in `DifficultyType` like enum in constants.py.
Default to `(30,)`.
debug : bool
If `True`, more output will be displayed in console for debugging purpose.
Default to `False`
random_skip : bool
If `True`, at each iteration data will be loaded from random start position until the end.
Default to `True`.
with_other_condition : bool
If `True`, data for all conditions are included to 'other_conditions' key
in the return dict. Default to `False`.
with_beats : bool
If `True`, a beat array generated from `bpm_info` is included to 'beats' key
in the return dict. Default to `False`.
app_name : AppName
A desired app name of data to be loaded. Defalt to `AppName.STEPMANIA`.
data_ids : Optional[DataIds]
If `test_split` is `True`, data of live IDs specified here will be loaded.
If `None`, `default_validation_ids` and `default_test_ids` in constants.py are set to
validation IDs and test IDs respectively, and the other songs are set to train IDs.
Default to `None`.
augmentation_setting: Optional[str]
setting file.
"""
def __init__(
self,
score_base_path: Path,
audio_base_path: Path,
device: str = None,
seq_length: int = 16000,
skip_step: int = 2000,
test_split: bool = True,
aug_count: int = 0,
diff_types: Tuple = (30,),
debug: bool = False,
random_skip: bool = True,
with_other_condition: bool = False,
with_beats: bool = False,
distinguish_downbeats: bool = True,
app_name: AppName = AppName.STEPMANIA,
data_ids: Optional[DataIds] = None,
augmentation_setting: Optional[str] = None,
):
self.score_base_path = Path(score_base_path)
self.audio_base_path = Path(audio_base_path)
self.seq_length = seq_length
self.skip_step = skip_step
if device is not None:
warnings.warn("Loader device parameter is obsolete. It should be None")
device = None
self.device = device
self.aug_count = aug_count
self.diff_types = validate_diff_types(diff_types, app_name)
self.debug = debug
self.random_skip = random_skip
self.with_other_condition = with_other_condition
self.with_beats = with_beats
self.distinguish_downbeats = distinguish_downbeats
self.app_name = app_name
if augmentation_setting:
self.augmentations = augmenation.load_augmentations(Path(augmentation_setting))
else:
self.augmentations = None
self._score_dict = self.load_dir(self.score_base_path)
live_ids = list(self._score_dict.keys())
self.live_ids = live_ids
if test_split:
if data_ids is None:
data_ids = get_default_dataids(live_ids, app_name)
assert set(data_ids.train_ids) & set(data_ids.validation_ids) == set()
assert set(data_ids.train_ids) & set(data_ids.test_ids) == set()
assert set(data_ids.test_ids) & set(data_ids.validation_ids) == set()
self.train_ids = data_ids.train_ids
self.test_ids = data_ids.test_ids
self.validation_ids = data_ids.validation_ids
else:
self.train_ids = sorted(live_ids)
self.test_ids = []
self.validation_ids = []
def load_dir(self, base_path: Path):
"""Load onset data for all live IDs.
Parameters
----------
base_path : Path
A path to a directory containing onset labels.
Directories named live IDs are expected just below a level of the directory.
Returns
-------
score_dict : Dict[int, Tuple[Dict, Dict]]
"""
score_dict = dict()
for e in base_path.iterdir():
if not e.is_dir():
continue
metadata, scores = load(e, self.app_name)
live_id = int(metadata["live_id"])
score_dict[live_id] = metadata, scores
return score_dict
def dataset(self, mode: str = "all", is_shuffle: bool = False, shuffle_buffer_size: int = 500):
"""Return Dataset class in the specified mode.
Parameters
----------
mode : str
A mode of the loader, which should be one of 'train', 'validation' of 'test'.
The data of live IDs specified to `data_ids` in a loader of corresponding mode
will be loaded.
is_shuffle : bool
If `True`, this method will return a loader which loads each data in shuffled manner.
shuffle_buffer_size : int
If `is_shuffle` is `True`, a returned loader will once load as many data as
the number of buffer size in normal order, and then shuffle the data and return.
Returns
-------
Type[IterableDataset]
"""
if is_shuffle:
return ShuffleDataset(self, shuffle_buffer_size, mode)
return ScoreDataset(self, mode)
def iter(self, train_or_test: str):
"""Iterate all data.
Parameters
----------
train_or_test : str
The set name of live IDs loaded in this loader.
The value must be one of 'train', 'validation' or 'test.
Returns
-------
"""
if train_or_test == "train":
ids = self.train_ids
elif train_or_test == "validation":
ids = self.validation_ids
elif train_or_test == "test":
ids = self.test_ids
else:
raise ValueError()
for id_ in ids:
yield from self.iter_live(id_, train_or_test)
def iter_live(self, live_id, train_or_test: str):
"""Iterate songs
Parameters
----------
live_id : int
A live ID of data to be loaded.
train_or_test : str
The set name of live IDs loaded in this loader.
The value must be one of 'train', 'validation' or 'test.
Returns
-------
"""
raise NotImplementedError
def iter_audio(self, live_id, train_or_test: str):
"""Iterate audio
Parameters
----------
live_id : int
A live ID of data to be loaded.
train_or_test : str
The set name of live IDs loaded in this loader.
The value must be one of 'train', 'validation' or 'test.
Returns
-------
"""
path = self.audio_base_path / str(live_id)
npz_path = path / "mel.npz"
with npz_path.open("rb") as fpt:
metadata, mel_data = load_audio(path, fpt)
keys = ["mel"]
if self.aug_count > 0 and train_or_test == "train":
keys += [f"mel_noise_{idx}" for idx in range(self.aug_count)]
for key in keys:
seq_length = int(round(self.seq_length / FRAME))
song_length = len(mel_data[key])
if self.random_skip and train_or_test == "train":
skip = np.random.randint(0, seq_length)
params = dict(key=key, skip=skip, seq_length=seq_length, live_id=live_id)
yield from iter_array(mel_data[key], seq_length, skip, params)
else:
skip_step = int(round(self.skip_step / FRAME))
for skip in range(0, seq_length, skip_step):
params = dict(key=key, skip=skip, seq_length=seq_length, live_id=live_id)
yield from iter_array(mel_data[key], seq_length, skip, params)
class OnsetLoader(BaseLoader):
def cut_segment(self, score_data: np.ndarray, start_index: int, end_index: int, length: int):
"""Ensure the length of data array.
Parameters
----------
score_data : np.ndarray
An array containing score data where the length will be adjusted.
start_index : int
The start index to cut the array.
end_index : int
The end index to cut the array.
length : length
The length of the array to be returned.
Returns
-------
"""
# Cut the score data by specified length
score_segment = score_data[start_index:end_index]
# Pad if the length is insufficient
score_segment = assert_length(score_segment, length)
return score_segment
def handle_other_conditions(
self, score_dict: Dict, start_index: int, end_index: int, length: int
):
"""Return a dict containing data of all conditions.
Parameters
----------
score_dict : Dict
A dict whose key is expected to be condition value,
and whose value is expected to be an array containing score data.
start_index : int
The start index to cut the arrays in `score_dict`.
end_index : int
The end index to cut the arrays in `score_dict`.
length : int
The length of arrays for each conditions to be returned.
Returns
-------
Dict
"""
# Add data of other difficulties for a convenience
dic = {
diff_type: torch.from_numpy(
self.cut_segment(score_dict[diff_type], start_index, end_index, length)
).float()
for diff_type in self.diff_types
if diff_type in score_dict.keys()
}
return dic
def get_bpm_info(self, live_id):
"""Return bpm information for specified live ID.
Parameters
----------
live_id : int
A live ID whose bpm information will be returned.
Returns
-------
"""
path = self.audio_base_path / str(live_id)
metadata = load_audio_meta(path)
return metadata["bpm_info"]
def iter_live(self, live_id, train_or_test: str):
"""Iterate songs
Parameters
----------
live_id : int
A live ID of data to be loaded.
train_or_test : str
The set name of live IDs loaded in this loader.
The value must be one of 'train', 'validation' or 'test.
Returns
-------
"""
# score_dic = {10: array([[0.], ...]), 20: array([[0.], ...]), ...}
_, score_dic = self._score_dict[live_id]
bpm_info = self.get_bpm_info(live_id)
onsets_array_len = len(list(score_dic.values())[0])
audio_path = self.audio_base_path / str(live_id)
audio_meta = load_audio_meta(audio_path)
mel_len = round(audio_meta["mel_length"] * 1000) # ms
beats_array = gen_beats_array(
onsets_array_len, bpm_info, mel_len, self.distinguish_downbeats
)
for diff_type in self.diff_types:
condition = diff_type
if diff_type in score_dic:
score_data = score_dic.get(diff_type)
else:
continue
for audio, start_index, end_index, params in self.iter_audio(live_id, train_or_test):
score_segment = self.cut_segment(score_data, start_index, end_index, audio.shape[0])
# Convert to the form of start, end, frame
data = dict(
condition=torch.Tensor([condition]), # difficulty
onset=torch.from_numpy(score_segment).float(),
audio=torch.from_numpy(audio).float(), # audio (mel-spectrogram)
)
if self.with_other_condition:
data["other_conditions"] = self.handle_other_conditions(
score_dic, start_index, end_index, audio.shape[0]
)
if self.with_beats:
# beat array(2 at downbeats, 1 at other beats)
data["beats"] = torch.from_numpy(
self.cut_segment(beats_array, start_index, end_index, audio.shape[0])
).float()
if self.augmentations and train_or_test == "train":
data = augmenation.apply_augmentation(self.augmentations, data)
if self.debug:
data.update(params)
data["start_index"] = start_index
data["end_index"] = end_index
yield data
class ScoreDataset(torch.utils.data.IterableDataset):
def __init__(self, score_loader: BaseLoader, mode: str):
self.score_loader = score_loader
self.mode = mode
def __iter__(self):
return iter(self.score_loader.iter(self.mode))
# https://discuss.pytorch.org/t/how-to-shuffle-an-iterable-dataset/64130
class ShuffleDataset(torch.utils.data.IterableDataset):
def __init__(self, score_loader: BaseLoader, buffer_size: int, mode: str):
self.score_loader = score_loader
self.buffer_size = buffer_size
self.mode = mode
def __iter__(self):
shufbuf = []
dataset_iter = iter(self.score_loader.iter(self.mode))
try:
for i in range(self.buffer_size):
shufbuf.append(next(dataset_iter))
except StopIteration:
self.buffer_size = len(shufbuf)
try:
while True:
try:
item = next(dataset_iter)
evict_idx = random.randint(0, self.buffer_size - 1)
yield shufbuf[evict_idx]
shufbuf[evict_idx] = item
except StopIteration:
break
while len(shufbuf) > 0:
yield shufbuf.pop()
except GeneratorExit:
pass
class OnsetTestDataset(torch.utils.data.Dataset):
def __init__(
self,
score_base_path: Path,
audio_base_path: Path,
live_ids: List[int],
device: str = None,
diff_type: int = 30,
with_beats: bool = True,
app_name: AppName = AppName.STEPMANIA,
):
super().__init__()
self.score_base_path = score_base_path
self.audio_base_path = audio_base_path
self.live_ids = live_ids
self.device = device
self.diff_type = diff_type
self.with_beats = with_beats
self.app_name = app_name
self.score_dict = self.load_score_dict()
self.exist_live_ids = list(self.score_dict.keys())
def load_score_dict(self):
score_dict = dict()
for live_id in self.live_ids:
try:
score = self.load_score(live_id)
score_dict[live_id] = score
except KeyError:
continue
return score_dict
def load_audio(self, live_id):
audio_dir = self.audio_base_path / str(live_id)
npz_path = audio_dir / "mel.npz"
with npz_path.open("rb") as fpt:
metadata, mel_data = load_audio(audio_dir, fpt)
audio = mel_data["mel"]
return audio, metadata
def load_score(self, live_id):
score_dir = self.score_base_path / str(live_id)
metadata, scores = load(score_dir, self.app_name)
return scores[self.diff_type]
def __getitem__(self, index):
live_id = self.exist_live_ids[index]
# load audio
audio, audio_meta = self.load_audio(live_id)
audio_len = len(audio)
bpm_info = audio_meta["bpm_info"]
# load score
score = self.score_dict[live_id]
score = assert_length(score, audio_len)
data = {
"live_id": live_id,
"audio": torch.from_numpy(audio).float(),
"onset": torch.from_numpy(score).float(),
"condition": torch.tensor([self.diff_type]),
}
if self.with_beats:
beats_array = gen_beats_array(audio_len, bpm_info, audio_len)
data["beats"] = torch.from_numpy(beats_array).float()
return data
def __len__(self):
return len(self.exist_live_ids)
| 20,358 | 34.101724 | 100 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/training/train.py | import math
import shutil
import typing
from logging import getLogger
from pathlib import Path
import mlflow
import numpy as np
import torch
from ignite.engine import Engine, Events
from ignite.handlers import Checkpoint, DiskSaver, EarlyStopping, ModelCheckpoint
from ignite.metrics import Average
from torch.nn.utils import clip_grad_norm_
from notes_generator.training.evaluate import evaluate
logger = getLogger(__name__)
def cycle(iterable):
while True:
for item in iterable:
yield item
def write_metrics(metrics, writer, mode: str, epoch: int, disable_mlflow: bool):
loss = metrics["loss"]
logger.info(f"{mode} Results - Epoch: {epoch} " f"Avg loss: {loss:.4f}")
# tensorboard
writer.add_scalar(f"{mode}-avg_loss", loss, epoch)
writer.add_scalar(f"{mode}-avg_loss_onset", metrics["loss-onset"], epoch)
if "loss-notes" in metrics:
writer.add_scalar(f"{mode}-avg_loss_notes", metrics["loss-notes"], epoch)
# mlflow
if disable_mlflow:
return
mlflow.log_metric(f"{mode}-avg_loss", loss, epoch)
mlflow.log_metric(f"{mode}-avg_loss_onset", metrics["loss-onset"], epoch)
if "loss-notes" in metrics:
mlflow.log_metric(f"{mode}-avg_loss_notes", metrics["loss-notes"], epoch)
def score_function(engine):
val_loss = engine.state.metrics["loss"]
return -val_loss
def train_ignite(
epochs,
model,
log_dir,
batch_size,
train_loader,
valid_loader,
optimizer,
lr_scheduler,
writer,
device,
onset_only: bool = True,
fuzzy_width: int = 1,
fuzzy_scale: float = 1.0,
merge_scale: typing.Optional[float] = None,
patience: int = 10,
enable_early_stop: bool = True,
disable_eval: bool = False,
resume_checkpoint: int = None,
lr_find: bool = False,
epoch_length=100,
start_lr=1e-7,
end_lr=1e-1,
clip_gradient_norm: float = 3,
loss_interval: int = 100,
validation_interval: int = 200,
checkpoint_interval: int = 200,
n_saved_checkpoint: int = 10,
n_saved_model: int = 40,
disable_mlflow: bool = False,
warmup_steps: int = 0,
send_model: bool = False,
eval_tolerance: float = 0.05,
):
if lr_find:
lr_find_loss = []
lr_find_lr = []
optimizer = torch.optim.Adam(model.parameters(), start_lr)
lr_find_epochs = 2
lr_lambda = lambda x: math.exp(
x * math.log(end_lr / start_lr) / (lr_find_epochs * epoch_length)
)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
epochs = lr_find_epochs
smoothing = 0.05
def update_model(engine, batch):
if warmup_steps > 0 and engine.state.iteration < warmup_steps:
lr_scale = min(1.0, float(engine.state.iteration + 1) / float(warmup_steps))
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * target_lr
optimizer.zero_grad()
predictions, losses = model.run_on_batch(batch, fuzzy_width, fuzzy_scale, merge_scale)
loss = sum(losses.values())
loss.backward()
clip_grad_norm_(model.parameters(), clip_gradient_norm)
optimizer.step()
if lr_scheduler:
if warmup_steps > 0 and engine.state.iteration < warmup_steps:
pass
else:
lr_scheduler.step()
if lr_find:
loss_v = loss.item()
i = engine.state.iteration
lr_step = optimizer.state_dict()["param_groups"][0]["lr"]
lr_find_lr.append(lr_step)
if i == 1:
lr_find_loss.append(loss_v)
else:
loss_v = smoothing * loss_v + (1 - smoothing) * lr_find_loss[-1]
lr_find_loss.append(loss_v)
losses = {key: value.item() for key, value in {"loss": loss, **losses}.items()}
i = engine.state.iteration
for key, value in losses.items():
# tensorboard
writer.add_scalar(key, value, global_step=i)
# mlflow
if not disable_mlflow:
mlflow.log_metric(key, value, step=i)
return predictions, losses
def evaluate_func(engine, batch):
model.eval()
with torch.no_grad():
predictions, losses = model.run_on_batch(batch)
loss = sum(losses.values())
losses = {key: value.item() for key, value in {"loss": loss, **losses}.items()}
model.train()
return predictions, losses
trainer = Engine(update_model)
evaluator = Engine(evaluate_func)
target_lr = [pg["lr"] for pg in optimizer.param_groups][-1]
checkpoint = Path(log_dir) / "checkpoint"
@trainer.on(Events.STARTED)
def resume_training(engine):
if resume_checkpoint:
engine.state.iteration = resume_checkpoint
engine.state.epoch = int(resume_checkpoint / engine.state.epoch_length)
@trainer.on(Events.COMPLETED)
def log_model(engine):
if not send_model or disable_mlflow:
return
mlflow.pytorch.log_model(model, "model")
for model_path in sorted(checkpoint.rglob("model*")):
step = str(model_path).split(".")[0].split("_")[-1]
model.load_state_dict(torch.load(checkpoint / model_path))
mlflow.pytorch.log_model(model, f"model_{step}")
@trainer.on(Events.COMPLETED)
def write_lr_find(engine):
if not lr_find:
return
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(lr_find_lr, lr_find_loss)
plt.xscale("log")
plt.show()
@trainer.on(Events.ITERATION_COMPLETED(every=loss_interval))
def log_training_loss(engine):
loss = engine.state.output[1]["loss"]
iteration_max = engine.state.max_epochs * engine.state.epoch_length
logger.info(f"Iteration[{engine.state.iteration}/{iteration_max}] " f"Loss: {loss:.4f}")
@trainer.on(Events.ITERATION_COMPLETED(every=validation_interval))
def log_validation_results(engine):
i = engine.state.iteration
lr = [pg["lr"] for pg in optimizer.param_groups][-1]
writer.add_scalar("learning_rate", lr, global_step=i)
mlflow.log_metric("learning_rate", lr, step=i)
evaluator.run(cycle(valid_loader), epoch_length=epoch_length_valid)
model.eval()
with torch.no_grad():
if disable_eval:
pass
else:
for key, value in evaluate(model, valid_loader, device, eval_tolerance).items():
k = "validation-" + key.replace(" ", "_")
v = np.mean(value)
# tensorboard
writer.add_scalar(k, v, global_step=i)
# mlflow
if not disable_mlflow:
mlflow.log_metric(k, v, step=i)
metrics = evaluator.state.metrics
write_metrics(metrics, writer, "validation", engine.state.epoch, disable_mlflow)
model.train()
avg_loss = Average(output_transform=lambda output: output[1]["loss"])
avg_loss_onset = Average(output_transform=lambda output: output[1]["loss-onset"])
if not onset_only:
avg_loss_notes = Average(output_transform=lambda output: output[1]["loss-notes"])
avg_loss_notes.attach(evaluator, "loss-notes")
avg_loss.attach(trainer, "loss")
avg_loss.attach(evaluator, "loss")
avg_loss_onset.attach(evaluator, "loss-onset")
if enable_early_stop:
handler = EarlyStopping(patience=patience, score_function=score_function, trainer=trainer)
evaluator.add_event_handler(Events.COMPLETED, handler)
to_save = {"trainer": trainer, "optimizer": optimizer}
if lr_scheduler:
to_save["lr_scheduler"] = lr_scheduler
if checkpoint.exists() and not resume_checkpoint:
shutil.rmtree(str(checkpoint))
handler = Checkpoint(
to_save,
DiskSaver(str(checkpoint), create_dir=True, require_empty=False),
n_saved=n_saved_checkpoint,
)
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=checkpoint_interval), handler)
model_handler = ModelCheckpoint(
dirname=str(checkpoint),
filename_prefix="model",
n_saved=n_saved_model,
create_dir=True,
require_empty=False,
)
trainer.add_event_handler(
Events.ITERATION_COMPLETED(every=checkpoint_interval), model_handler, {"mymodel": model}
)
epoch_length_valid = len(list(valid_loader))
if resume_checkpoint:
check_point_path = f"{str(checkpoint)}/checkpoint_{resume_checkpoint}.pth"
model_state_path = f"{str(checkpoint)}/model_mymodel_{resume_checkpoint}.pth"
to_load = {"trainer": trainer, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
checkpoint_ = torch.load(check_point_path, map_location=torch.device(device))
Checkpoint.load_objects(to_load=to_load, checkpoint=checkpoint_)
model_state = torch.load(model_state_path, map_location=torch.device(device))
model.load_state_dict(model_state)
# release memory
del model_state
del checkpoint_
logger.info(f"epoch_length: {epoch_length} epoch_length_valid: {epoch_length_valid}")
trainer.run(cycle(train_loader), max_epochs=epochs, epoch_length=epoch_length)
| 9,367 | 36.774194 | 98 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/models/merge_labels.py | import typing
import torch
def merge_labels(onset_label: torch.Tensor, batch: typing.Dict, scale: float) -> torch.Tensor:
assert "other_conditions" in batch
other_conditions = batch["other_conditions"]
for condition, score in other_conditions.items():
onset_label = torch.max(onset_label, score * scale)
return onset_label
| 350 | 28.25 | 94 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/models/fuzzy_label.py | import torch
import torch.nn.functional as F
from notes_generator.models.util import round_decimal
def shift(ar, size, med):
# [0, 0, 0, 1, 0, 0...]
# -> [0, 0, med - 1, 0, mid - 1, 0 ...]
if size > 0:
ar = F.pad(ar[size:], [0, size]) + F.pad(ar[:-size], [size, 0])
ar = ar * (med - size)
return ar
def gauss(ar, width=3, scale=1.0):
# [0, 0, 0, 1, 0, 0...]
# -> [0, 1, 2, 3, 2, 1...]
t = torch.stack([shift(ar, i, width) for i in range(width)])
t = torch.max(t, dim=0)[0]
mask = t == 0
# Heuristically define std value so that non-zero value appears
# within a range of `width` parameter of the center when the values
# are rounded at the second decimal place.
std = width / 3
var = std**2
t2 = torch.exp(-((t - width) ** 2) / (2 * var)) * scale
return torch.max(t2.masked_fill(mask, 0), ar.float())
def fuzzy_label(onset_label: torch.Tensor, width: int, scale: float) -> torch.Tensor:
"""Apply fuzzy label
Parameters
----------
onset_label : torch.Tensor shape = [frame_len, 1]
width : int
scale : float
Returns
-------
fuzzy_labeled_onset : torch.Tensor shape = [frame_len, 1]
Examples
>>> onset = torch.tensor([[0.], [0.], [1.], [0.], [1.], [0.], [0.], [0.]])
>>> fuzzy_label(onset, width=2)
tensor([[0.0100], [0.3200], [1.0000], [0.3200], [1.0000], [0.3200],
[0.0100], [0.0000]])
"""
assert width > 0
assert 0 <= scale <= 1
return round_decimal(gauss(onset_label.view(-1), width, scale), 2).view(len(onset_label), 1)
def fuzzy_on_batch(batch: torch.Tensor, width: int, scale: float) -> torch.Tensor:
"""Apply fuzzy label to batch
Parameters
----------
batch : torch.Tensor shape = [batch_size, frame_len, 1]
width : int
scale : float
Returns
-------
fuzzy_labeled_onsets : torch.Tensor shape = [batch_size, frame_len, 1]
"""
fuzzy_labeled = []
for x in range(batch.shape[0]):
fuzzy_labeled.append(fuzzy_label(batch[x], width, scale))
return torch.stack(fuzzy_labeled)
| 2,107 | 27.876712 | 96 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/models/onsets.py | import typing
import torch
from torch import nn
from torch.nn import functional as F
from notes_generator.constants import *
from notes_generator.layers.base_layers import BiLSTM, get_conv_stack
from notes_generator.models.fuzzy_label import fuzzy_on_batch
from notes_generator.models.merge_labels import merge_labels
from notes_generator.models.util import batch_first
class OnsetsBase(nn.Module):
"""The base class for onset prediction model"""
def predict(self, batch: typing.Dict[str, torch.Tensor]):
"""Predict an onset score.
Parameters
----------
batch : typing.Dict[str, torch.Tensor]
The Dict containing tensors below:
* audio
* onset
* conditions
* beats
Returns
-------
pred : torch.Tensor
The tensor of shape (batch_size, seq_len, output_features)
containing predicted onset score.
`output_features` defaults to `1`.
"""
pred, _ = self.predict_with_probs(batch)
return pred
def predict_with_probs(self, batch: typing.Dict[str, torch.Tensor]):
"""Predict an onset score with a probability
Parameters
----------
batch : typing.Dict[str, torch.Tensor]
The Dict containing tensors below:
* audio
* onset
* conditions
* beats
Returns
-------
pred : torch.Tensor
The tensor of shape (batch_size, seq_len, output_features)
containing predicted onset score.
proba : torch.Tensor
The tensor of shape (batch_size, seq_len, output_features)
containing predicted probability of onset score on each frames.
"""
device = next(self.parameters()).device
mel = batch_first(batch["audio"]).to(device)
condition = batch["condition"].expand(
(
mel.shape[0],
mel.shape[1],
)
)
condition = condition.reshape(-1, condition.shape[-1], 1).to(device)
if self.enable_beats:
beats = batch["beats"].reshape(mel.shape[0], mel.shape[1], -1).to(device)
else:
beats = None
self.eval()
with torch.no_grad():
probs = self(mel, condition, beats)
return probs > 0.5, probs
def run_on_batch(
self,
batch: typing.Dict[str, torch.Tensor],
fuzzy_width: int = 1,
fuzzy_scale: float = 1.0,
merge_scale: typing.Optional[float] = None,
net: typing.Optional[nn.Module] = None,
):
"""Forward training on one minibatch
Parameters
----------
batch : typing.Dict[str, torch.Tensor]
The Dict containing minibatch tensors below:
* audio
* onset
* conditions
* beats
fuzzy_width : int
The width of fuzzy labeling applied to notes_label.
default: `1`
fuzzy_scale : float
The scale of fuzzy labeling applied to notes_label.
The value should be within an interval `[0, 1]`.
default: `1.0`
merge_scale : typing.Optional[float] = None,
If nonzero, mix the label of other conditions in specified scale.
Formally, at each time step use the label calculated as below:
max(onset_label, merge_scale * onset_label_in_other_conditions)
default: `None`
net : typing.Optional[nn.Module] = None,
If not `None`, use specified model for forward propagation.
default: `None`
Returns
-------
g_loss : typing.Dict
The Dict containing losses evaluated for current iteration.
"""
device = next(self.parameters()).device
audio_label = batch["audio"].to(device)
onset_label = batch["onset"].to(device)
# reshape batch first
audio_label = batch_first(audio_label)
if self.enable_condition:
condition = batch["condition"].expand(
(
audio_label.shape[0],
audio_label.shape[1],
)
)
condition = condition.reshape(-1, condition.shape[-1], 1).to(device)
else:
condition = None
if self.enable_beats:
beats = (
batch["beats"].reshape(audio_label.shape[0], audio_label.shape[1], -1).to(device)
)
else:
beats = None
if net is None:
net = self
if self.onset_weight:
weight_onset = torch.tensor([self.onset_weight]).float().to(audio_label.device)
else:
weight_onset = None
if fuzzy_width > 1 and self.training:
onset_label = fuzzy_on_batch(onset_label, fuzzy_width, fuzzy_scale)
if merge_scale and self.training:
onset_label = merge_labels(onset_label, batch, merge_scale)
onset_pred = net(audio_label, condition, beats)
predictions = {
"onset": onset_pred.reshape(*onset_label.shape),
}
losses = {
"loss-onset": F.binary_cross_entropy(
predictions["onset"], onset_label, weight=weight_onset
),
}
return predictions, losses
class SimpleOnsets(OnsetsBase):
"""Model for onset prediction
Parameters
----------
input_features : int
Size of each input sample
output_features : int
Size of each output sample. In principle, set the value to `1`.
inference_chunk_length : int
Size of the chunk length used for inference, normally is sequence_length/FRAME
model_complexity : int
Number of channels defining convolution stack. default: `48`
num_layers : int
Number of recurrent layers. default: `2`
enable_condition : bool
If `True`, the game difficulty level will be provided to a model.
default: `False`
enable_beats : bool
If `True`, beats information will be provided to a model.
default: `False`
dropout : float
The rate of a Dropout layer before the linear layer.
default: `0.5`
onset_weight: typing.Optional[int]
The scale factor multiplied to the loss calculated in training.
default: `None`
conv_stack_type: ConvStackType
The type of ConvStack.
default: `ConvStackType.v1`
dropout_rnn: float
The rate of Dropout layers of the RNN layer.
default: 0
"""
def __init__(
self,
input_features,
output_features,
inference_chunk_length: int = 640,
model_complexity: int = 48,
num_layers: int = 1,
enable_condition: bool = False,
enable_beats: bool = False,
dropout: float = 0.5,
onset_weight: typing.Optional[int] = None,
conv_stack_type: ConvStackType = ConvStackType.v1,
rnn_dropout: float = 0.0,
):
super().__init__()
model_size = model_complexity * 16
self.enable_condition = enable_condition
self.enable_beats = enable_beats
self.onset_weight = onset_weight
condition_length = 0
beats_length = 0
if self.enable_condition:
condition_length = 1
if self.enable_beats:
beats_length = 1
self.conv_stack_type = conv_stack_type
self.onset_stack = get_conv_stack(conv_stack_type, input_features, model_size)
self.onset_sequence = BiLSTM(
model_size + condition_length + beats_length,
model_size // 2,
inference_chunk_length=inference_chunk_length,
num_layers=num_layers,
dropout=rnn_dropout,
)
self.drop = nn.Dropout(dropout)
self.onset_linear = nn.Sequential(nn.Linear(model_size, output_features), nn.Sigmoid())
def forward(self, mel, condition=None, beats=None):
"""
Parameters
----------
mel : torch.Tensor
Tensor of shape (batch_size, seq_len, input_features)
containing the log-scaled melspectrogram audio data.
condition : torch.Tensor
Tensor of shape (batch_size, seq_len, 1)
containing the game difficulty level.
beats : torch.Tensor
Tensor of shape (batch_size, seq_len, 1)
containing the beats information.
Returns
-------
output: torch.Tensor
Tensor of shape (batch_size, 1, output_features)
"""
onset_pred = self.onset_stack(mel)
if self.enable_condition:
onset_pred = torch.cat([onset_pred, condition], dim=-1)
if self.enable_beats:
onset_pred = torch.cat([onset_pred, beats], dim=-1)
onset_pred = self.onset_sequence(onset_pred)
onset_pred = self.drop(onset_pred)
onset_pred = self.onset_linear(onset_pred)
return onset_pred
| 9,076 | 33.25283 | 97 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/models/util.py | import typing
import torch
from torch import nn
def round_decimal(x: torch.Tensor, n_dig: int) -> torch.Tensor:
return torch.round(x * 10**n_dig) / (10**n_dig)
def batch_first(data):
shapes = [-1] + list(data.shape[1:])
return data.reshape(*shapes)
def initialize_weights(m):
if hasattr(m, "weight") and m.weight.dim() > 1:
nn.init.xavier_uniform_(m.weight.data)
def convert1d(target):
left = target[:, :, 0]
right = target[:, :, 1]
target = left * 8 + right
return target
class MyDataParallel(nn.DataParallel):
def run_on_batch(
self, batch, fuzzy_width=1, fuzzy_scale=1.0, merge_scale: typing.Optional[float] = None
):
return self.module.run_on_batch(
batch,
fuzzy_width=fuzzy_width,
fuzzy_scale=fuzzy_scale,
merge_scale=merge_scale,
net=self,
)
def predict(self, batch):
return self.module.predict(batch)
def state_dict(self, destination=None, prefix="", keep_vars=False):
return self.module.state_dict(destination, prefix, keep_vars)
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict)
| 1,220 | 24.4375 | 95 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/layers/base_layers.py | import typing
import torch
from torch import nn
from notes_generator.constants import ConvStackType, NMELS
from notes_generator.layers.drop import DropBlock2d
class BiLSTM(nn.Module):
"""Bidirectional LSTM Stack
Parameters
----------
input_features : int
The number of expected features in the input x
recurrent_features : int
The number of features in the hidden state h
num_layers : int
Number of recurrent layers. default: `1`
dropout: float
The Rate of Dropout
"""
def __init__(
self,
input_features,
recurrent_features,
inference_chunk_length: int = 640,
num_layers: int = 1,
dropout: float = 0,
):
super().__init__()
self.inference_chunk_length = inference_chunk_length
self.num_layers = num_layers
self.rnn = nn.LSTM(
input_features,
recurrent_features,
batch_first=True,
bidirectional=True,
num_layers=num_layers,
dropout=dropout,
)
def forward(
self, x: torch.Tensor, hc: typing.Optional[typing.Tuple[torch.Tensor, torch.Tensor]] = None
):
"""
Parameters
----------
x : torch.Tensor
Tensor of shape (batch, seq_len, input_features)
containing the features of input sequence.
hc : typing.Optional[typing.Tuple[torch.Tensor, torch.Tensor]]
Tuple of tensors (h_0, c_0).
* h_0 is a tensor of shape (num_layers * 2, batch, recurrent_features)
containing the initial hidden state for each element in the batch.
* c_0 is a tensor of shape (num_layers * 2, batch, recurrent_features)
containing the initial cell state for each element in the batch.
If not provided, both hidden state and cell state are initialized with zero.
default: `None`
Returns
-------
output: torch.Tensor
Tensor of shape (batch, seq_len, 2 * recurrent_features)
containing output features (h_t) from the last layer of the LSTM,
for each t.
"""
if self.training:
self.rnn.flatten_parameters()
val = self.rnn(x, hc)[0]
return val
else:
self.rnn.flatten_parameters()
# evaluation mode: support for longer sequences that do not fit in memory
batch_size, sequence_length, input_features = x.shape
hidden_size = self.rnn.hidden_size
num_directions = 2 if self.rnn.bidirectional else 1
if hc:
h, c = hc
else:
h = torch.zeros(
num_directions * self.num_layers, batch_size, hidden_size, device=x.device
)
c = torch.zeros(
num_directions * self.num_layers, batch_size, hidden_size, device=x.device
)
output = torch.zeros(
batch_size, sequence_length, num_directions * hidden_size, device=x.device
)
# forward direction
slices = range(0, sequence_length, self.inference_chunk_length)
for start in slices:
end = start + self.inference_chunk_length
output[:, start:end, :], (h, c) = self.rnn(x[:, start:end, :], (h, c))
# reverse direction
if self.rnn.bidirectional:
h.zero_()
c.zero_()
for start in reversed(slices):
end = start + self.inference_chunk_length
result, (h, c) = self.rnn(x[:, start:end, :], (h, c))
output[:, start:end, hidden_size:] = result[:, :, hidden_size:]
return output
def get_conv_stack(conv_stack_type: ConvStackType, input_features: int, output_features: int):
if conv_stack_type is ConvStackType.v1:
return ConvStack(input_features, output_features)
elif conv_stack_type is ConvStackType.v7:
return ConvStackV7(input_features, output_features)
else:
raise ValueError
class ConvStack(nn.Module):
"""Convolution stack for piano transcription task
Notes
-----
See link below for an original implementation:
https://github.com/jongwook/onsets-and-frames/blob/master/onsets_and_frames/transcriber.py
Parameters
----------
input_features : int
Size of each input sample.
output_features : int
Size of each output sample.
"""
def __init__(self, input_features: int, output_features: int):
super().__init__()
# input is batch_size * 1 channel * frames * input_features
self.cnn = nn.Sequential(
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(output_features // 16, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
nn.Conv2d(output_features // 16, output_features // 8, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((1, 2)),
nn.Dropout(0.25),
)
self.fc = nn.Sequential(
nn.Linear((output_features // 8) * (input_features // 4), output_features),
nn.Dropout(0.5),
)
def forward(self, mel: torch.Tensor):
"""
Parameters
----------
mel : torch.Tensor
Tensor of shape (batch_size, seq_len, input_features(frequency))
containing the log-scaled melspectrogram audio data.
Returns
-------
torch.Tensor
Tensor of shape (batch_size, seq_len, output_features)
"""
x = mel.view(mel.size(0), 1, mel.size(1), mel.size(2))
x = self.cnn(x)
x = x.transpose(1, 2).flatten(-2)
x = self.fc(x)
return x
# The multi-scale conv-stack proposed in our paper
class ConvStackV7(nn.Module):
"""Convolution stack
Parameters
----------
input_features : int
Size of each input sample.
output_features : int
Size of each output sample.
"""
def __init__(
self,
input_features: int,
output_features: int,
dropout: float = 0.25,
dropout_last: float = 0.5,
):
super().__init__()
# input is batch_size * 1 channel * frames * input_features
self.cnns = nn.ModuleList(
[
nn.Sequential(
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(
output_features // 16,
output_features // 16,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((1, 4)),
DropBlock2d(dropout, 5, 0.25),
nn.Conv2d(
output_features // 16,
output_features // 8,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((1, 4)),
DropBlock2d(dropout, 3, 1.00),
nn.Conv2d(
output_features // 8,
output_features // 4,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 4),
nn.ReLU(),
nn.MaxPool2d((1, 4)),
nn.AvgPool2d((1, NMELS // 64)),
),
nn.Sequential( # 16x max pooling in time direction (~0.5s)
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(
output_features // 16,
output_features // 16,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((2, 4)),
DropBlock2d(dropout, 5, 0.25),
nn.Conv2d(
output_features // 16,
output_features // 8,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((8, 4)),
DropBlock2d(dropout, 3, 1.00),
nn.Conv2d(
output_features // 8,
output_features // 4,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 4),
nn.ReLU(),
nn.MaxPool2d((1, 4)),
nn.AvgPool2d((1, NMELS // 64)),
nn.Upsample(scale_factor=(16, 1)),
),
nn.Sequential( # 64x max pooling in time direction (~2s)
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(
output_features // 16,
output_features // 16,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((2, 4)),
DropBlock2d(dropout, 5, 0.25),
nn.Conv2d(
output_features // 16,
output_features // 8,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((32, 4)),
DropBlock2d(dropout, 3, 1.00),
nn.Conv2d(
output_features // 8,
output_features // 4,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 4),
nn.ReLU(),
nn.MaxPool2d((1, 4)),
nn.AvgPool2d((1, NMELS // 64)),
nn.Upsample(scale_factor=(64, 1)),
),
nn.Sequential( # 128x max pooling in time direction (4s)
# layer 0
nn.Conv2d(1, output_features // 16, (3, 3), padding=1),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 1
nn.Conv2d(
output_features // 16,
output_features // 16,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 16),
nn.ReLU(),
# layer 2
nn.MaxPool2d((2, 4)),
DropBlock2d(dropout, 5, 0.25),
nn.Conv2d(
output_features // 16,
output_features // 8,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 8),
nn.ReLU(),
# layer 3
nn.MaxPool2d((64, 4)),
DropBlock2d(dropout, 3, 1.00),
nn.Conv2d(
output_features // 8,
output_features // 4,
(3, 3),
padding=1,
),
nn.BatchNorm2d(output_features // 4),
nn.ReLU(),
nn.MaxPool2d((1, 4)),
nn.AvgPool2d((1, NMELS // 64)),
nn.Upsample(scale_factor=(128, 1)),
),
]
)
self.dropout = nn.Dropout(dropout_last)
def forward(self, mel: torch.Tensor):
"""
Parameters
----------
mel : torch.Tensor
Tensor of shape (batch_size, seq_len, input_features(frequency))
containing the log-scaled melspectrogram audio data.
Returns
-------
torch.Tensor
Tensor of shape (batch_size, seq_len, output_features)
"""
padding = 0
if mel.shape[1] % 128 != 0:
padding = 128 - mel.shape[1] % 128
mel = torch.cat(
[mel, torch.zeros((mel.shape[0], padding, mel.shape[-1])).to(mel.device)], dim=1
)
x = mel.view(mel.size(0), 1, mel.size(1), mel.size(2))
xs = []
for mod in self.cnns:
xs.append(mod(x))
x = torch.cat(xs, dim=1)
if padding > 0:
x = x[:, :, :-padding, :]
x = self.dropout(x)
# x: B, C, H, W
x = x.transpose(1, 2).flatten(-2)
return x
| 14,245 | 33.916667 | 99 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/layers/transformer_layers.py | # https://github.com/novdov/music-transformer/blob/master/music_transformer/modules/attention.py
import math
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiheadAttention(nn.Module):
"""Apply multi-head attention to input data
Parameters
----------
num_heads : int
Number of parallel attention heads.
d_model : int
Number of expected features in the encoder/decoder of the model.
dropout : float
Rate of Dropout layer after computing attention.
"""
def __init__(
self,
num_heads: int,
d_model: int,
dropout: float,
):
super().__init__()
self.num_heads = num_heads
self.d_model = d_model
self.dropout = nn.Dropout(p=dropout)
self.depth = d_model // num_heads
projection_inout = (self.d_model, self.d_model)
self.query_projection = nn.Linear(*projection_inout)
self.key_projection = nn.Linear(*projection_inout)
self.value_projection = nn.Linear(*projection_inout)
self.attention_projection = nn.Linear(*projection_inout)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
query : torch.Tensor
Tensor of shape (batch, query_len, d_model).
key : torch.Tensor
Tensor of shape (batch, memory_len, d_model).
value : torch.Tensor
Tensor of shape (batch, memory_len, d_model).
mask : Optional[torch.Tensor]
Mask tensor of shape (memory_len, memory_len)
containing boolean-like values.
Attention is prevent for certain position of memory that
corresponding value of the mask tensor is zero.
Returns
-------
output : torch.Tensor
Tensor of shape (batch, query_len, d_model)
weights : torch.Tensor
Tensor of shape (batch, query_len, memory_len)
"""
queries = self.split_heads(self.query_projection(query))
keys = self.split_heads(self.key_projection(key))
values = self.split_heads(self.value_projection(value))
logits = torch.matmul(queries, keys.transpose(-2, -1))
logits = logits / math.sqrt(self.depth)
if mask is not None:
logits = logits.masked_fill(mask == 0, -1e9)
weights = self.dropout(F.softmax(logits, dim=-1))
output = torch.matmul(weights, values)
output = output.permute(0, 2, 1, 3).contiguous().view(output.size(0), -1, self.d_model)
return self.attention_projection(output), weights
def split_heads(self, tensor: torch.Tensor) -> torch.Tensor:
"""Convert the shape of tensors for multi-head attention.
Parameters
----------
tensor : torch.Tensor
Tensor of shape (batch, seq_len, d_model)
Returns
-------
torch.Tensor
Tensor of shape (batch, seq_len, num_heads, depth),
where `depth = d_model // num_heads`.
"""
batch_size, _, _ = tensor.size()
return tensor.view(batch_size, -1, self.num_heads, self.depth).transpose(1, 2)
class RelativeGlobalAttention(MultiheadAttention):
def __init__(
self,
num_heads: int,
max_relative_position: int,
d_model: int,
dropout: float,
):
super().__init__(num_heads, d_model, dropout)
self.max_relative_position = max_relative_position
length = max(max_relative_position, self.depth)
range_vec = torch.arange(length)
relative_mat = torch.clamp(
range_vec[None, :] - range_vec[:, None], -max_relative_position, +max_relative_position
)
relative_mat = relative_mat + max_relative_position
self.relative_embedding = relative_mat[:max_relative_position, : self.depth].float()
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
queries = self.split_heads(self.query_projection(query))
keys = self.split_heads(self.query_projection(key))
values = self.split_heads(self.query_projection(value))
length_q = queries.shape[2]
length_k = keys.shape[2]
logits = torch.matmul(queries, keys.transpose(-2, -1))
key_relative_embedding = self._get_relative_embedding_left(length_q).to(query.device)
rel_logits = torch.einsum("bhld,md->bhlm", [queries, key_relative_embedding])
rel_logits = self._skew(rel_logits, length_k)
logits = (logits + rel_logits) / math.sqrt(self.depth)
# logits = logits / math.sqrt(self.depth)
if mask is not None:
logits = logits.masked_fill(mask == 0, -1e9)
weights = self.dropout(F.softmax(logits, dim=-1))
output = torch.matmul(weights, values)
output = output.permute(0, 2, 1, 3).contiguous().view(output.size(0), -1, self.d_model)
return self.attention_projection(output), weights
def _get_relative_embedding_left(self, length: int) -> torch.Tensor:
starting_point = max(0, self.max_relative_position - length)
return self.relative_embedding[starting_point:, :]
@staticmethod
def _skew(rel_logits: torch.Tensor, length_key) -> torch.Tensor:
batch_size, num_heads, length_q, _ = rel_logits.size()
assert rel_logits.shape[-2] == rel_logits.shape[-1]
# (B, H, L, L) -> (B, H, L, 1 + L)
rel_logits = F.pad(rel_logits, [1, 0, 0, 0])
# (B, H, L, 1 + L) -> (B, H, 1 + L, L)
rel_logits = rel_logits.reshape(batch_size, num_heads, 1 + length_q, length_q)
# (B, H, 1 + L, L) -> (B, H, L, L)
rel_logits = rel_logits[:, :, 1:, :]
if length_key > length_q: # M > L
# (B, H, L, L) -> (B, H, L, M)
rel_logits = F.pad(rel_logits, [0, length_key - length_q, 0, 0])
elif length_key < length_q:
# (B, H, L, L) -> (B, H, L, M)
rel_logits = rel_logits[:, :, :, :length_key]
return rel_logits
class RelLearnbaleAttention(MultiheadAttention):
"""Attention layer for TransformerXL model.
Parameters
----------
num_heads : int
Number of parallel attention heads.
d_model : int
Number of expected features in the encoder/decoder of the model.
dropout : float
Rate of Dropout layer after computing attention.
"""
def __init__(
self,
num_heads: int,
d_model: int,
dropout: float,
):
super().__init__(num_heads, d_model, dropout)
projection_inout = (self.d_model, self.d_model)
self.pos_proejction = nn.Linear(*projection_inout)
# noinspection PyMethodOverriding
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
pos_: torch.Tensor,
u: nn.Parameter,
v: nn.Parameter,
mem: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
query : torch.Tensor
Tensor of shape (batch, query_len, d_model).
key : torch.Tensor
Tensor of shape (batch, memory_len, d_model).
value : torch.Tensor
Tensor of shape (batch, memory_len, d_model).
pos_ : torch.Tensor
Tensor of shape (memory_len + query_len, d_model)
containing positional embedding.
u : nn.Parameter
Learnable parameter of shape (n_head, d_model // n_head)
containing learnable global content bias.
v : nn.Parameter
Learnable parameter of shape (n_head, d_model // n_head)
containing learnable global positional bias.
mem : torch.Tensor
Tensor of shape (batch, mem_len, d_model)
containing memory from previous sentence.
mask : Optional[torch.Tensor]
Mask tensor of shape (query_len, memory_len + query_len)
containing boolean-like values.
Attention is prevent for certain position of memory that
corresponding value of the mask tensor is zero.
Returns
-------
output : torch.Tensor
Tensor of shape (batch, query_len, d_model)
weights : torch.Tensor
Tensor of shape (batch, n_head, query_len, memory_len)
"""
batch = query.size(0)
# query: (B, L, E)
# key, value: (B, M, E)
if mem.size(1) > 0:
key = torch.cat((mem, key), dim=1)
value = torch.cat((mem, value), dim=1)
# (B, L, H, D)
queries = self.query_projection(query).view(batch, -1, self.num_heads, self.depth)
# (B, M, H, D)
keys = self.key_projection(key).view(batch, -1, self.num_heads, self.depth)
values = self.value_projection(value).view(batch, -1, self.num_heads, self.depth)
# (M, H, D)
pos_ = self.pos_proejction(pos_).view(-1, self.num_heads, self.depth)
# term (a) (c)
content_attention = torch.einsum("blhd,bmhd->bhlm", [(queries + u), keys])
# term (b) (d)
pos_attention = torch.einsum("blhd,mhd->bhlm", [(queries + v), pos_])
pos_attention = self._skew(pos_attention)
logits = content_attention + pos_attention
logits = logits / math.sqrt(self.depth)
if mask is not None:
logits = logits.masked_fill(mask == 0, -1e9)
weights = self.dropout(F.softmax(logits, dim=-1))
# (B, H, L, M), (B, H, M, D) -> (B, L, H, D)
output = torch.einsum("bhlm,bmhd->blhd", [weights, values])
# (B, L, H x D)
output = output.contiguous().view(output.size(0), -1, self.d_model)
return self.attention_projection(output), weights
@staticmethod
def _skew(rel_logits: torch.Tensor) -> torch.Tensor:
batch_size, num_heads, length_q, length_m = rel_logits.size()
# (B, H, L, M) -> (B, H, L, 1 + M)
rel_logits = F.pad(rel_logits, [1, 0, 0, 0])
# (B, H, L, 1 + M) -> (B, H, 1 + M, L)
rel_logits = rel_logits.view(batch_size, num_heads, 1 + length_m, length_q)
# (B, H, 1 + M, L) -> (B, H, L, M)
rel_logits = rel_logits[:, :, 1:, :].view(batch_size, num_heads, length_q, length_m)
return rel_logits
class LocalRNN(nn.Module):
def __init__(self, output_dim, ksize):
super(LocalRNN, self).__init__()
self.ksize = ksize
self.rnn = nn.GRU(output_dim, output_dim, batch_first=True)
self.output = nn.Sequential(nn.Linear(output_dim, output_dim), nn.ReLU())
idx = [
i
for j in range(self.ksize - 1, 10000, 1)
for i in range(j - (self.ksize - 1), j + 1, 1)
]
self.select_index = torch.LongTensor(idx)
def forward(self, x):
self.rnn.flatten_parameters()
x = self.get_k(x) # b x seq_len x ksize x d_model
batch, l, ksize, d_model = x.shape
h = self.rnn(x.view(-1, self.ksize, d_model))[0][:, -1, :]
return h.view(batch, l, d_model)
def get_k(self, x):
batch_size, l, d_model = x.shape
x = F.pad(x, [0, 0, self.ksize - 1, 0])
key = torch.index_select(x, 1, self.select_index[: self.ksize * l].to(x.device))
key = key.reshape(batch_size, l, self.ksize, -1)
return key
class LocalRNNLayer(nn.Module):
def __init__(self, output_dim, ksize, dropout):
super(LocalRNNLayer, self).__init__()
self.local_rnn = LocalRNN(output_dim, ksize)
self.norm = nn.LayerNorm(output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.norm(x + self.dropout(self.local_rnn(x)))
# https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/transformer.py
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model: int,
nhead: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
is_r_transformer: bool = False,
):
super(TransformerEncoderLayer, self).__init__()
self.ffn = nn.Sequential(
nn.Linear(d_model, dim_feedforward),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(dim_feedforward, d_model),
)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.d_model = d_model
self.nhead = nhead
self.dropout = dropout
self.self_attn = self.build_attention()
self.is_r_transformer = is_r_transformer
if is_r_transformer:
self.local_rnn = LocalRNNLayer(d_model, ksize=7, dropout=0.1)
def build_attention(self):
return MultiheadAttention(self.nhead, self.d_model, self.dropout)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
subsequent_mask = merge_mask(src_mask, src_key_padding_mask)
if self.is_r_transformer:
src = self.local_rnn(src)
src2 = self.self_attn(src, src, src, mask=subsequent_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.ffn(src)
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class RGATransformerEncoderLayer(TransformerEncoderLayer):
def __init__(
self,
d_model: int,
nhead: int,
max_relative_position: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
):
self.max_relative_position = max_relative_position
super(RGATransformerEncoderLayer, self).__init__(d_model, nhead, dim_feedforward, dropout)
def build_attention(self):
return RelativeGlobalAttention(
self.nhead, self.max_relative_position, self.d_model, self.dropout
)
class TransformerXLEncoderLayer(TransformerEncoderLayer):
def build_attention(self):
return RelLearnbaleAttention(self.nhead, self.d_model, self.dropout)
def forward(self, src, pos, u, v, mem, src_mask=None, src_key_padding_mask=None):
subsequent_mask = merge_mask(src_mask, src_key_padding_mask)
if self.is_r_transformer:
src = self.local_rnn(src)
src2 = self.self_attn(src, src, src, pos, u, v, mem, mask=subsequent_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.ffn(src)
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model: int,
nhead: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
):
super(TransformerDecoderLayer, self).__init__()
# Implementation of Feedforward model
self.ffn = nn.Sequential(
nn.Linear(d_model, dim_feedforward),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(dim_feedforward, d_model),
)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.d_model = d_model
self.nhead = nhead
self.dropout = dropout
self.self_attn, self.multihead_attn = self.build_attention()
def build_attention(self):
self_attn = MultiheadAttention(self.nhead, self.d_model, dropout=self.dropout)
multihead_attn = MultiheadAttention(self.nhead, self.d_model, dropout=self.dropout)
return self_attn, multihead_attn
def forward(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
):
subsequent_mask = merge_mask(tgt_mask, tgt_key_padding_mask)
tgt2 = self.self_attn(tgt, tgt, tgt, mask=subsequent_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.ffn(tgt)
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
class RGATransformerDecoderLayer(TransformerDecoderLayer):
def __init__(
self,
d_model: int,
nhead: int,
max_relative_position: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
):
self.max_relative_position = max_relative_position
super(RGATransformerDecoderLayer, self).__init__(d_model, nhead, dim_feedforward, dropout)
def build_attention(self):
self_attn = RelativeGlobalAttention(
self.nhead, self.max_relative_position, self.self.d_model, dropout=self.dropout
)
multihead_attn = RelativeGlobalAttention(
self.nhead, self.max_relative_position, self.d_model, dropout=self.dropout
)
return self_attn, multihead_attn
class TransformerXL(nn.Module):
"""A TransformerXL model.
Features below are introduced to vanilla Transformer:
* Recurrent mechanism
* Relative positional encoding
Parameters
----------
d_model : int
Number of expected features in the encoder/decoder of the model.
nhead : int
Number of parallel attention heads.
num_layers : int
Number of sub-encoder-layers in the encoder. default: `6`
dim_feedforward : int
Size of hidden layer of feed forward network in encoder.
default: `2048`
max_mem_length : int
Maximum length of memory that attention is applied to.
default: `100`
dropout : float
Rate of Dropout layer after computing attention.
default: `0.1`
has_mask : bool
If `True`, source mask will be applied so as to prevent an attention
to future information.
default: `True`
is_r_transformer : bool
If `True`, insert Local RNN structure before computing attention
in each encoder layers.
default: `False`
"""
def __init__(
self,
d_model: int = 512,
nhead: int = 8,
num_layers: int = 6,
dim_feedforward: int = 2048,
max_mem_length: int = 100,
dropout: float = 0.1,
has_mask: bool = True,
is_r_transformer: bool = False,
):
assert d_model % nhead == 0, f"d_model: {d_model}, nhead: {nhead}"
super(TransformerXL, self).__init__()
self.encoder_layers = nn.ModuleList(
[
TransformerXLEncoderLayer(
d_model, nhead, dim_feedforward, dropout, is_r_transformer
)
for _ in range(num_layers)
]
)
self.encoder_norm = nn.LayerNorm(d_model)
self.d_model = d_model
self.nhead = nhead
self.n_layers = num_layers
self.max_mem_length = max_mem_length
self.has_mask = has_mask
# content bias
self.u = nn.Parameter(torch.zeros(nhead, d_model // nhead), requires_grad=True)
nn.init.xavier_normal_(self.u)
# pos bias
self.v = nn.Parameter(torch.zeros(nhead, d_model // nhead), requires_grad=True)
nn.init.xavier_normal_(self.v)
self.pos_enb = PositionalEncoding(d_model, apply_positional_encoding="add")
def create_mask(self, q_len: int, m_len: int) -> torch.Tensor:
"""Create an attention mask tensor
Parameters
----------
q_len : int
Size of query sequence.
m_len : int
Size of memory sequence.
Returns
-------
torch.Tensor
Tensor of shape (q_len, q_len + m_len) containing boolean values,
where `True` intends that place to be masked.
"""
return torch.triu(torch.ones(q_len, q_len + m_len), diagonal=m_len + 1) == 0
def init_mem(self, batch_size: int, device: str) -> List[torch.Tensor]:
"""Initialize memory sequence.
Parameters
----------
batch_size : int
Size of minibatch.
device : str
The desired device in which the computation is performed.
choices: [`cpu`, `cuda`].
Returns
-------
List[torch.Tensor]
List of length `num_layers` containing initial memory tensors
whose shapes are (batch, 0, d_model).
"""
param = next(self.parameters())
return [
torch.zeros(batch_size, 0, self.d_model, dtype=param.dtype, device=device)
for _ in range(self.n_layers)
]
def _update_mems(
self, memory: List[torch.Tensor], outputs: List[torch.Tensor], mlen: int, qlen: int
) -> List[torch.Tensor]:
"""Update memory with new output
Parameters
----------
memory : torch.Tensor
Tensor of shape (num_layers, mlen)
containing previous memory.
outputs : torch.Tensor
Tensor of shape (num_layers, )
containing outputs from encoder layers.
mlen : int
Length of memory.
qlen : int
Length of query.
Returns
-------
new_mems : List[torch.Tensor]
List of length `num_layers` containing tensors of shape (num_layers, mem_length)
containing updated memory, where mem_length is calculated as below:
`mem_length = min(mlen + qlen, max_mem_length)`
"""
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
start_idx = max(0, end_idx - self.max_mem_length)
for mem, output in zip(memory, outputs):
cat = torch.cat([mem, output], dim=1)
new_mems.append(cat[:, start_idx:end_idx].detach())
return new_mems
def forward(self, tgt: torch.Tensor, memory: List[torch.Tensor] = None):
"""
Parameters
----------
tgt : torch.Tensor
Tensor of shape (batch, query_len, d_model)
memory : List[torch.Tensor]
List of length `num_layers` containing tensors of shape (num_layers, mem_length)
containing memory, where mem_length is calculated as below:
`mem_length = min(mlen + qlen, max_mem_length)`
Returns
-------
output : torch.Tensor
Tensor of shape (batch, query_len, d_model)
new_mems : List[torch.Tensor]
List of length `num_layers` containing tensors of shape (num_layers, mem_length)
containing updated memory, where mem_length is calculated as below:
`mem_length = min(mlen + qlen, max_mem_length)`
"""
if memory is None:
memory = self.init_mem(tgt.size(0), tgt.device)
mlen = memory[0].size(1)
qlen = tgt.size(1)
pos = torch.zeros(1, mlen + qlen, self.d_model).to(tgt.device)
pos = self.pos_enb(pos).squeeze(0) / math.sqrt(self.d_model)
output = tgt
mask = None
if self.has_mask:
mask = self.create_mask(qlen, mlen).to(tgt.device)
new_mems = []
for mem, layer in zip(memory, self.encoder_layers):
new_mems.append(output.detach())
output = layer(output, pos, self.u, self.v, mem, mask)
new_mems = self._update_mems(memory, new_mems, mlen, qlen)
assert len(new_mems) == self.n_layers
return output, new_mems
class ScaledEmbedding(nn.Module):
def __init__(self, d_model: int, vocab_size: int):
super().__init__()
self.d_model = d_model
self.embedding = nn.Embedding(vocab_size, d_model)
nn.init.xavier_normal_(self.embedding.weight)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.embedding(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
POSSIBLE_METHODS = ("add", "concat")
def __init__(
self,
d_model: int,
max_len: int = 5000,
dropout: float = 0.1,
apply_positional_encoding: str = "concat",
):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.apply_positional_encoding = apply_positional_encoding
self.positional_encoding = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
self.positional_encoding[:, 0::2] = torch.sin(position * div_term)
self.positional_encoding[:, 1::2] = torch.cos(position * div_term)
self.positional_encoding = self.positional_encoding.unsqueeze(0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.apply_positional_encoding not in self.POSSIBLE_METHODS:
raise ValueError(
f"{self.apply_positional_encoding} should be one of {self.POSSIBLE_METHODS}s"
)
self.positional_encoding = self.positional_encoding.to(x.device)
if self.apply_positional_encoding == "add":
x = x + self.positional_encoding[:, : x.size(1)]
else:
batch_size, seq_len, _ = x.size()
x = torch.cat(
[x, self.positional_encoding[:, :seq_len].repeat(batch_size, 1, 1)], dim=-1
)
return self.dropout(x)
def create_subsequent_mask(size: int) -> torch.Tensor:
"""Mask out subsequent positions."""
attn_shape = (size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype("uint8")
return torch.from_numpy(subsequent_mask) == 0
def merge_mask(mask, key_padding_mask):
if key_padding_mask is not None:
key_padding_mask = ~key_padding_mask[:, None, None, :]
subsequent_mask = key_padding_mask & mask
else:
subsequent_mask = mask
return subsequent_mask
| 26,512 | 34.925474 | 99 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/layers/attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1):
super(Attention, self).__init__()
self.d_model = d_model
projection_inout = (self.d_model, self.d_model)
self.query_projection = nn.Linear(*projection_inout)
self.key_projection = nn.Linear(*projection_inout)
self.value_projection = nn.Linear(*projection_inout)
self.attention_projection = nn.Linear(*projection_inout)
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value):
"""
Parameters
----------
query (batch, q_length, model_size)
key (batch, key_length, model_size)
value (batch, key_length, model_size)
Returns
-------
(batch, q_length, model_size)
"""
# query: (batch, q_length, dim)
# key: (batch, key_length, dim)
# value: (batch, key_length, dim)
queries = self.query_projection(query)
keys = self.query_projection(key)
values = self.query_projection(value)
score = torch.matmul(queries, keys.transpose(-2, -1))
# score: (batch, q_length, key_length)
weights = self.dropout(F.softmax(score, dim=-1))
output = torch.matmul(weights, values)
# output: (batch, key_length, dim)
output = self.attention_projection(output)
return output, weights
| 1,484 | 32.75 | 64 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/layers/drop.py | # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
""" DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
Papers:
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def drop_block_2d(
x,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
batchwise: bool = False,
):
"""DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
# seed_drop_rate, the gamma parameter
gamma = (
gamma_scale
* drop_prob
* total_size
/ clipped_block_size**2
/ ((W - block_size + 1) * (H - block_size + 1))
)
# Forces the block to be inside the feature map.
w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device))
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & (
(h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)
)
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
if batchwise:
# one mask for whole batch, quite a bit faster
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
else:
uniform_noise = torch.rand_like(x)
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size, # block_size,
stride=1,
padding=clipped_block_size // 2,
)
if with_noise:
normal_noise = (
torch.randn((1, C, H, W), dtype=x.dtype, device=x.device)
if batchwise
else torch.randn_like(x)
)
if inplace:
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
else:
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = (
block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)
).to(x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
def drop_block_fast_2d(
x: torch.Tensor,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
batchwise: bool = False,
):
"""DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
block mask at edges.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
gamma = (
gamma_scale
* drop_prob
* total_size
/ clipped_block_size**2
/ ((W - block_size + 1) * (H - block_size + 1))
)
if batchwise:
# one mask for whole batch, quite a bit faster
block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma
else:
# mask per batch element
block_mask = torch.rand_like(x) < gamma
block_mask = F.max_pool2d(
block_mask.to(x.dtype),
kernel_size=clipped_block_size,
stride=1,
padding=clipped_block_size // 2,
)
if with_noise:
normal_noise = (
torch.randn((1, C, H, W), dtype=x.dtype, device=x.device)
if batchwise
else torch.randn_like(x)
)
if inplace:
x.mul_(1.0 - block_mask).add_(normal_noise * block_mask)
else:
x = x * (1.0 - block_mask) + normal_noise * block_mask
else:
block_mask = 1 - block_mask
normalize_scale = (
block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)
).to(dtype=x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
class DropBlock2d(nn.Module):
"""DropBlock. See https://arxiv.org/pdf/1810.12890.pdf"""
def __init__(
self,
drop_prob=0.1,
block_size=7,
gamma_scale=1.0,
with_noise=False,
inplace=False,
batchwise=False,
fast=True,
):
super(DropBlock2d, self).__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
self.inplace = inplace
self.batchwise = batchwise
self.fast = fast # FIXME finish comparisons of fast vs not
def forward(self, x):
if not self.training or not self.drop_prob:
return x
if self.fast:
return drop_block_fast_2d(
x,
self.drop_prob,
self.block_size,
self.gamma_scale,
self.with_noise,
self.inplace,
self.batchwise,
)
else:
return drop_block_2d(
x,
self.drop_prob,
self.block_size,
self.gamma_scale,
self.with_noise,
self.inplace,
self.batchwise,
)
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
| 7,452 | 33.345622 | 108 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/prediction/predictor.py | import logging
from logging import getLogger
from typing import Callable, Dict, List, Optional
from typing import Tuple, Union
import numpy as np
import torch
from notes_generator.constants import (
FRAME,
MAX_THRESHOLD,
SMDifficultyType,
SMNotesType,
sm_init_threshold,
sm_max_notes,
sm_min_note_distance,
)
from notes_generator.prediction.onset_prediction import predict as onset_predict, predict_proba
logger = getLogger(__name__)
logger.setLevel(level=logging.INFO)
class Predictor:
def predict_all(self, mel: np.array, bpm_info: Optional[List] = None):
raise NotImplementedError
def predict(self, mel: np.array, condition: int):
raise NotImplementedError
@property
def difficulties(self):
raise NotImplementedError
class PredictorDDC(Predictor):
def __init__(self, onset_model: torch.nn.Module, sym_model: torch.nn.Module, device):
self.onset_model = onset_model
self.sym_model = sym_model
self.device = device
@property
def min_note_distance(self):
raise NotImplementedError
@property
def max_threshold(self):
raise NotImplementedError
@property
def init_threshold(self):
raise NotImplementedError
@property
def sym_predict(self) -> Callable:
raise NotImplementedError
def predict_onsets_proba(
self,
mel: np.array,
condition: int,
bpm_info: Optional[List[Tuple[Union[float, int], int, int]]] = None,
):
return predict_proba(self.onset_model, mel, condition, bpm_info, self.device)
def predict_onsets(
self,
mel: np.array,
condition: int,
threshold: Optional[float] = None,
bpm_info: Optional[List[Tuple[Union[float, int], int, int]]] = None,
):
# predict notes timings
if threshold is None:
sequence, probs = onset_predict(
self.onset_model,
mel,
condition,
threshold=self.init_threshold[condition],
bpm_info=bpm_info,
device=self.device,
)
else:
sequence, probs = onset_predict(
self.onset_model,
mel,
condition,
device=self.device,
threshold=threshold,
bpm_info=bpm_info,
)
logger.info("predict onset complete")
sequence = sequence.nonzero()[0]
sequence = filter_overwrap(sequence, probs, self.min_note_distance[condition])
if threshold is None:
songlen = mel.shape[0] * FRAME # ms
sequence = self._adjust_threshold(sequence, probs, condition, songlen, bpm_info)
return sequence, probs
def _adjust_threshold(
self,
sequence: np.ndarray,
probs: np.ndarray,
condition: int,
songlen: int,
bpm_info: List,
) -> np.ndarray:
raise NotImplementedError
def predict_sym(self, sequence, condition, bpm_info=None):
notes_sequence, sym_probs = self.sym_predict(
self.sym_model, sequence, condition, self.device
)
logger.info("predict symbol complete")
# timing(ms)、note type(left and right lane)
# [[ 2144 6 4]
# [ 4896 0 1]
# ...
notes_sequence = filter_notes(notes_sequence, sym_probs)
return notes_sequence
def predict(
self,
mel: np.array,
condition: int,
threshold: Optional[float] = None,
bpm_info: Optional[List[Tuple[Union[float, int], int, int]]] = None,
):
"""Predict from the beginning"""
# predict notes timing
sequence, probs = self.predict_onsets(mel, condition, threshold, bpm_info)
# predict notes type
notes_sequence = self.predict_sym(sequence, condition, bpm_info)
return notes_sequence, probs
class SMPredictorDDC(PredictorDDC):
"""Combine onset and sym model"""
@property
def difficulties(self):
return (d.value for d in SMDifficultyType)
@property
def min_note_distance(self):
return sm_min_note_distance
@property
def max_threshold(self):
return MAX_THRESHOLD
@property
def init_threshold(self) -> Dict:
return sm_init_threshold
def _adjust_threshold(
self,
sequence: np.ndarray,
probs: np.ndarray,
condition: int,
songlen: int,
bpm_info: List,
) -> np.ndarray:
assert bpm_info is not None, "bpm_info must be provided"
max_notes = sm_max_notes[condition]
sequence, _ = adjust_threshold(
sequence,
probs,
max_notes,
self.init_threshold[condition],
self.max_threshold,
)
return sequence
def predict_sym(self, sequence, condition, bpm_info=None):
# notes_sequence:
# timing(ms), note type for each lane
# [[ 2144 0 1 0 0]
# [ 4896 0 1 1 0]
# ...
# notes_sequence, sym_probs = self.sym_predict(
# self.sym_model, sequence, condition, self.device
# )
notes_sequence = np.zeros((sequence.shape[0], 5), dtype=np.int)
for i, x in enumerate(sequence[:-1]):
# In this demo, we focus on notes timing
# so we place all notes in the first lane
notes_sequence[i] = np.array([int(x * FRAME), 1, 0, 0, 0])
logger.info("predict symbol complete")
return notes_sequence
def predict_all(self, mel: np.array, bpm_info: Optional[List] = None):
scores = dict()
probs = dict()
for difficulty in self.difficulties:
score_, prob_ = self.predict(mel, difficulty, bpm_info=bpm_info)
scores[difficulty] = _convert_to_csv_type_stepmania(score_)
probs[difficulty] = prob_
logger.info(f"predicted notes: {len(scores[difficulty])} difficulty: {difficulty}")
return scores, probs
def _convert_to_csv_type_stepmania(score: np.ndarray) -> List[Dict]:
times, rails_array = score[:, 0], score[:, 1:]
score_converted = []
def convert(note, time) -> Dict:
rail, note_type = note
track_index = rail * 4
is_long_head, is_long_tail = False, False
if note_type == SMNotesType.hold_roll_head.value:
is_long_head = True
elif note_type == SMNotesType.hold_roll_tail.value:
is_long_tail = True
return {
"tap_time": time,
"track_index": track_index,
"is_long_head": is_long_head,
"is_long_tail": is_long_tail,
}
for time, rails in zip(times, rails_array):
notes = [(rail, notetype) for rail, notetype in enumerate(rails) if notetype != 0]
conv_notes = [convert(note, time) for note in notes]
score_converted += conv_notes
return score_converted
def filter_overwrap(sequence, probs, min_distance):
"""Filter nearby notes
Parameters
----------
sequence
probs
min_distance
Returns
-------
"""
prev_frame = 0
prev_prob = None
excludes = set()
for frame in sequence:
prob = probs[frame]
if prev_prob and (frame - prev_frame) <= min_distance:
if prob > prev_prob:
# logger.info(f'exclude overwrap {prev_frame} {frame} {float(prev_prob):.3f} < {float(prob):.3f}')
excludes.add(prev_frame)
else:
# logger.info(f'exclude overwrap {prev_frame} {frame} {float(prev_prob):.3f} > {float(prob):.3f}')
excludes.add(frame)
prev_frame = frame
prev_prob = prob
logger.info(f"exclude overwrap {len(excludes)}")
return np.array([frame for frame in sequence if frame not in excludes])
def filter_by_threshold(sequence, probs, threshold):
"""Filter notes whose probabilities are below threshold"""
probs_ = probs[sequence].flatten()
filtered_seq = sequence[np.argwhere(probs_ >= threshold).flatten()]
return np.sort(filtered_seq)
def adjust_threshold(onset_seq, probs, max_notes, init_threshold, max_threshold):
"""Adjust a threshold so that the notes number is within acceptable limits."""
# Start from small value, then gradually raise the threshold
# until the number of notes falls below the limit
threshold_ = init_threshold
while len(onset_seq) > max_notes and threshold_ <= max_threshold:
threshold_ += 0.05
onset_seq = filter_by_threshold(onset_seq, probs, threshold_)
logger.info(f"threshold was chosen to be {threshold_}")
return onset_seq, threshold_
def filter_notes(notes_sequence, probs):
"""Filter irregular notes
Parameters
----------
notes_sequence
Returns
-------
"""
flags = [None, False, False]
return_values = []
for idx in range(len(notes_sequence)):
# Delete inconsistent long notes
val = notes_sequence[idx]
msec, lnotes, rnotes = val
if msec == 0:
continue
lnotes = rewrite_long_notes(1, idx, notes_sequence, probs, flags)
rnotes = rewrite_long_notes(2, idx, notes_sequence, probs, flags)
# lnotes = handle_long_notes(1, idx, notes_sequence, flags)
# rnotes = handle_long_notes(2, idx, notes_sequence, flags)
return_values.append((msec, lnotes, rnotes))
return return_values
def rewrite_long_notes(rail, cur_idx, values, probs, flags):
"""Rewrite long notes to be consistent
Parameters
----------
rail
cur_idx
values
probs
flags
Returns
-------
"""
notes = values[cur_idx][rail]
if notes not in (6, 7):
return notes
if notes == 7:
# In the case of "long notes end":
# check "long notes start" exists before the note
if flags[rail]:
flags[rail] = False
notes = 7
else:
logger.info(f"[{cur_idx}]: invalid long notes 7 {probs[cur_idx]:.2f}")
notes = 1
return notes
# long notes start
start_idx = cur_idx + 1
end_idx = cur_idx + 7
next_values = [
(i, row[rail]) for i, row in zip(range(start_idx, end_idx), values[start_idx:end_idx])
]
close_idx = None
excludes = set()
exclude_p = 0.0
for i, val in next_values:
if val == 7:
close_idx = i
break
elif val == 0:
continue
else:
# exclude inconsistent note
excludes.add(i)
exclude_p *= probs[i]
if close_idx:
long_notes_p = probs[cur_idx] * probs[close_idx]
else:
long_notes_p = probs[cur_idx]
if not close_idx:
# When "long note end" does not exist
logger.info(f"[{cur_idx}]: invalid long notes 6 {long_notes_p:.2f}")
values[cur_idx][rail] = 1
notes = 1
elif not excludes or long_notes_p > exclude_p:
if excludes:
# exclude inconsistent note
logger.info(
f"[{cur_idx}]: invalid long notes exclude middle {long_notes_p:.2f} {exclude_p:.2f}"
)
for idx in excludes:
values[idx][rail] = 0
notes = 6
assert flags[rail] is False
flags[rail] = True
else:
logger.info(
f"[{cur_idx}]: invalid long notes prefer middle sequence {long_notes_p:.4f} {exclude_p:.4f}"
)
values[cur_idx][rail] = 1
values[close_idx][rail] = 1
notes = 1
return notes
def current_beat_interval(msec, bpm_info):
assert (
msec >= bpm_info[0][1]
), f"A note exists before the start of bpm info (Note position: {msec}, start of bpm_info: {bpm_info[0][1]})"
current_bpm = None
for (bpm, msec_, _) in bpm_info:
if msec >= msec_:
current_bpm = bpm
else:
break
return 60000 / current_bpm
def count_measures(songlen, bpm_info):
"""Count measures of the song
Parameters
----------
songlen: int
Song length [ms]
bpm_info: List[Tuple[Union[int, float], int, int]]
Returns
-------
measure_count: int
Count of measures
"""
measure_count = 0
for i, (bpm, time, beat) in enumerate(bpm_info):
if time > songlen:
break
if i == len(bpm_info) - 1:
current_bpm_length = songlen - time
else:
next_time = bpm_info[i + 1][1]
current_bpm_length = min(next_time, songlen) - time
beat_intv = current_beat_interval(time, bpm_info)
measure_intv = beat_intv * beat
measure_count += current_bpm_length / measure_intv
return measure_count
| 12,884 | 29.246479 | 114 | py |
AAAI-23.6040 | AAAI-23.6040-master/notes_generator/prediction/onset_prediction.py | from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from notes_generator.constants import ConvStackType
from notes_generator.models.beats import gen_beats_array
def predict(
model,
mel,
condition: int,
threshold: float = 0.5,
bpm_info: Optional[List[Tuple[Union[float, int], int, int]]] = None,
device: str = "cpu",
use_rand: bool = False,
):
probs = predict_proba(model, mel, condition, bpm_info, device)
if use_rand:
rn = np.random.rand(*probs.shape)
prediction_ = (probs >= rn).astype(np.int)
else:
prediction_ = (probs >= threshold).astype(np.int)
return prediction_, probs
def predict_proba(
model,
mel,
condition: int,
bpm_info: Optional[List[Tuple[Union[float, int], int, int]]] = None,
device: str = "cpu",
):
model.eval()
with torch.no_grad():
mel = torch.tensor(mel)
mel = mel.reshape(1, mel.shape[0], mel.shape[1]).float().to(device)
# shape: (BATCH, TIME, N_MEL)
condition = torch.tensor([condition]).expand(1, mel.shape[1], 1).float().to(device)
beats_arr = None
if bpm_info is not None:
beats_arr = (
torch.from_numpy(gen_beats_array(mel.shape[1], bpm_info, mel.shape[1]))
.reshape(1, -1, 1)
.float()
.to(device)
)
prediction = model(mel, condition, beats_arr)
probs = (prediction[0]).cpu().numpy()
return probs
| 1,516 | 28.173077 | 91 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/main.py | import argparse
import os
import time
import math
import ast
import numpy as np
import torch
import torch.nn as nn
import gc
import data
import model
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='../data/ptb',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU, SRU)')
parser.add_argument('--emsize', type=int, default=280,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=960,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=960,
help='number of hidden units for the last rnn layer')
parser.add_argument('--nlayers', type=int, default=3,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=str, default='[0.25,0.1,0.15,0.15]',
help='maximum gradient norm, given as a list with value for each layer. '
'for lwgc reffer to the structure: [emb, L0, L1, ..., Ln]')
parser.add_argument('--epochs', type=int, default=600,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=12, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=70,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.4,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=str, default='[0.225]',
help='dropout for rnn layers (0 = no dropout), given as a list with value for each layer')
parser.add_argument('--dropouti', type=float, default=0.4,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--dropoutl', type=float, default=0.29,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.5,
help='amount of weight dropout to apply to the RNN hidden to hidden matrix')
parser.add_argument('--tied', action='store_false',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=28,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='./TEST',
help='path to save the final model')
parser.add_argument('--load_prev', type=str, default='./GL/L0/TEST/)',
help='path of pretrained model to load layers from, for GL training')
parser.add_argument('--dir', type=str, default=None,
help='path to directory that contains the saved files')
parser.add_argument('--alpha', type=float, default=2,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=1.2e-6,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--n_experts', type=int, default=15,
help='number of MoS experts')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=40,
help='max sequence length')
parser.add_argument('--single_gpu', action='store_true',
help='use single GPU')
parser.add_argument('--gpu', type=str, default=None,
help='set gpu device ID (-1 for cpu)')
parser.add_argument('--GL', default=False,
help='use gradual learning (GL).')
parser.add_argument('--lwgc', default=True, help='use layer-wise grad clipping (LWGC).')
parser.add_argument('--start_layer', type=int, default=0,
help='starting layer up to which initializing from previous phase, in case of a GL training. Normally set to L-1 when training L layers phase.')
parser.add_argument('--new_layer', type=int, default=None,
help='the new/uninitialized layer location in the network for GL process, default/None will add the new layer as the deepest.')
args = parser.parse_args()
if args.gpu is not None:
args.cuda = True
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
#to run on cpu, model must have been trained on cpu
args.cuda=False
args.dropouth = ast.literal_eval(args.dropouth)
if (args.nlayers - 1) > len(args.dropouth):
args.dropouth = args.nlayers * args.dropouth
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.dropoutl < 0:
args.dropoutl = args.dropouth[-1]
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if not os.path.exists(args.dir):
os.makedirs(args.dir)
if not os.path.exists(args.dir + '/L' + str(args.nlayers-1)):
os.makedirs(args.dir + '/L' + str(args.nlayers-1))
# adapt args.save directory
if args.GL and not args.continue_train:
args.save = 'L' + str(args.start_layer) + '/' + args.save
if args.dir is not None:
args.save = args.dir + '/' + args.save
if not os.path.exists(args.save):
os.makedirs(args.save)
args.clip = ast.literal_eval(args.clip)
if not args.lwgc:
args.clip = args.clip[0]
if not args.continue_train:
# args.save = '{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(args.save, scripts_to_save=['main.py', 'model.py'])
def logging(s, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(os.path.join(args.save, 'log.txt'), 'a+') as f_log:
f_log.write(s + '\n')
# load previous phase layers for initialization
def load_layers(model,path):
# get a list of the restored parameters - exclude the new layer
new_plist = []
for p in model.parameters():
new_plist.append(p)
new_layer = (args.nlayers - 1) if args.new_layer is None else args.new_layer
idx = 1 + 4 * new_layer
new_plist[idx:idx + 4] = []
# load old model and restore parameters:
temp_model = torch.load(path)
for i, p in enumerate(temp_model.parameters()):
new_plist[i].data = p.data
return model
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed_all(args.seed)
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
elif args.GL:
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nhidlast, args.nlayers,
args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop,
args.tied, args.dropoutl, args.n_experts)
if args.start_layer > 0:
print('loading prev model...')
model = load_layers(model, os.path.join(args.load_prev, 'finetune_model.pt'))
else:
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nhidlast, args.nlayers,
args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop,
args.tied, args.dropoutl, args.n_experts)
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
total_params = sum(x.data.nelement() for x in model.parameters())
logging('Args: {}'.format(args))
logging('Model total parameters: {}'.format(total_params))
criterion = nn.CrossEntropyLoss()
###############################################################################
# Evaluation code
###############################################################################
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
###############################################################################
# Training code
###############################################################################
def train(epoch):
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
gradlist = None
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i, N = 0, 0, 1
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
seq_len = max(5, int(np.random.normal(bptt, 5)))
# There's a very small chance that it could select a very long sequence length resulting in OOM
seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization - AR
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization - TAR(slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
N += 1
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# gradient norm clipping or LWGC -
# helps prevent the exploding gradient problem in RNNs and reduce covariate shift.
if args.lwgc:
plist = []
for param in model.parameters():
plist.append(param)
# embeddings clip:
emb_params = [plist[0], plist[-1]]
torch.nn.utils.clip_grad_norm(emb_params, args.clip[0])
# layers clip:
for idx in range(1,4*args.nlayers,4):
l_params = plist[idx:idx+4]
torch.nn.utils.clip_grad_norm(l_params, args.clip[1+idx//4])
# MoS clip:
mos_params = plist[-4:-1]
torch.nn.utils.clip_grad_norm(mos_params, args.clip[-1])
else:
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
###
batch += 1
i += seq_len
lr = args.lr
best_val_loss = []
stored_loss = 100000000
val_perp_list = []
# At any point you can hit Ctrl + C to break out of training early.
try:
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
# Loop over epochs.
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train(epoch)
if 't0' in optimizer.param_groups[0]:
tmp = {}
for prm in model.parameters():
tmp[prm] = prm.data.clone()
prm.data = optimizer.state[prm]['ax'].clone()
val_loss2 = evaluate(val_data)
logging('-' * 89)
logging('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss2, math.exp(val_loss2)))
logging('-' * 89)
val_perp_list.append(math.exp(val_loss2))
if val_loss2 < stored_loss:
save_checkpoint(model, optimizer, args.save)
logging('Saving Averaged!')
stored_loss = val_loss2
for prm in model.parameters():
prm.data = tmp[prm].clone()
else:
val_loss = evaluate(val_data, eval_batch_size)
logging('-' * 89)
logging('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging('-' * 89)
val_perp_list.append(math.exp(val_loss))
if val_loss < stored_loss:
save_checkpoint(model, optimizer, args.save)
logging('Saving Normal!')
stored_loss = val_loss
if 't0' not in optimizer.param_groups[0] and (len(best_val_loss)>args.nonmono and val_loss > min(best_val_loss[:-args.nonmono])):
logging('Switching!')
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
#optimizer.param_groups[0]['lr'] /= 2.
best_val_loss.append(val_loss)
except KeyboardInterrupt:
logging('-' * 89)
logging('Exiting from training early')
# Load the best saved model.
model = torch.load(os.path.join(args.save, 'model.pt'))
parallel_model = nn.DataParallel(model, dim=1).cuda()
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
logging('=' * 89)
logging('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
logging('=' * 89)
| 17,738 | 42.584767 | 164 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/evaluate.py | import argparse
import os
import time
import math
import csv
import ast
import pickle
import numpy as np
import torch
import torch.nn as nn
import data
import model
from utils import batchify, get_batch, repackage_hidden
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='./data/penn/',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU, SRU)')
parser.add_argument('--emsize', type=int, default=280,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=960,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=960,
help='number of hidden units for the last rnn layer')
parser.add_argument('--nlayers', type=int, default=3,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=str, default='[0.25,0.1,0.15,0.15]',
help='gradient clipping, for lwgc reffer to the structure [emb, L0, L1, ..., Ln]')
parser.add_argument('--epochs', type=int, default=1000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=12, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=70,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.4,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=str, default='[0.225]',
help='dropout for rnn layers (0 = no dropout), given as list for each layer')
parser.add_argument('--dropouti', type=float, default=0.4,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--dropoutl', type=float, default=0.29,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.5,
help='amount of weight dropout to apply to the RNN hidden to hidden matrix')
parser.add_argument('--tied', action='store_false',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=28,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='SWEEP/EXP',
help='path to save the final model')
parser.add_argument('--load_prev', type=str, default='DYN-20180227-120514',
help='path to pretrained layers on GL processes')
parser.add_argument('--dir', type=str, default=None,
help='path to directory that contains the saved files')
parser.add_argument('--alpha', type=float, default=2,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=1.2e-6,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--n_experts', type=int, default=15,
help='number of experts')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=40,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_true',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0,
help='set gpu device ID (-1 for cpu)')
parser.add_argument('--GL', default=False,
help='use gradual learning')
parser.add_argument('--lwgc', default=False, help='use layer-wise grad clipping')
parser.add_argument('--start_layer', type=int, default=0,
help='which layer to train in case of gradual learning training')
parser.add_argument('--new_layer', type=int, default=None,
help='the new layer location in a GL process')
parser.add_argument('--record_stats', default=False, help='record gradients statistics')
args = parser.parse_args()
if args.gpu>=0:
args.cuda = True
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
else:
#to run on cpu, model must have been trained on cpu
args.cuda=False
args.dropouth = ast.literal_eval(args.dropouth)
if (args.nlayers - 1) > len(args.dropouth):
args.dropouth = args.nlayers * args.dropouth
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.dropoutl < 0:
args.dropoutl = args.dropouth[-1]
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
args.clip = ast.literal_eval(args.clip)
if not args.lwgc:
args.clip = args.clip[0]
def logging(s, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(os.path.join(args.save, 'log.txt'), 'a+') as f_log:
f_log.write(s + '\n')
def log_results(s, name='.npy', print_=False, log_=True):
if not os.path.exists('/'.join(args.save.split('/')[:-1]) + '/graph/'):
os.makedirs('/'.join(args.save.split('/')[:-1]) + '/graph/')
if not args.continue_train:
fname = '/'.join(args.save.split('/')[:-1]) + '/graph/' + args.save.split('/')[-1] + name
else:
# fname = './' + args.save.split('/')[-2] + '/graph/' + args.save.split('/')[-1] + name
fname = '/'.join(args.save.split('/')[:-1]) + '/graph/' + args.save.split('/')[-1] + name
if print_:
print(s)
if log_:
with open(fname, 'wb') as f_log:
pickle.dump(s, f_log)
f_log.close()
def log_csv(perp):
if args.dir is not None:
fname = args.dir + '/' + args.dir + '-results.csv'
else:
fname = 'results.csv'
if not os.path.isfile(fname):
with open(fname, 'a', newline='') as fd:
fdwriter = csv.writer(fd, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
dicta = vars(args)
keyslist = sorted(dicta, key=str.lower)
line = ["save"]
for k in keyslist:
line += [k]
line += ["perp"]
fdwriter.writerow(line)
fd.close()
with open(fname, 'a', newline='') as fd:
fdwriter = csv.writer(fd, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
dicta = vars(args)
line = [dicta["save"]]
del (dicta["save"])
keyslist = sorted(dicta, key=str.lower)
for k in keyslist:
if isinstance(dicta[k], list):
dicta[k] = '-'.join(str(dicta[k]).split(','))
dicta[k] = dicta[k].replace(" ", "")
dicta[k] = dicta[k].replace("[", "")
dicta[k] = dicta[k].replace("]", "")
line += [str(dicta[k])]
line += [perp]
fdwriter.writerow(line)
fd.close()
avglist,normrec,avgrec,varrec = [],[],[],[]
def record_stats(glist, epoch, N):
N = (epoch-1)*663 + N
Nm = N - 1
for i,g in enumerate(glist):
rawg = g.data.cpu().numpy()
if N == 1:
if len(avglist) == i:
avglist.append(rawg)
else:
avglist[i] = avglist[i] * Nm / N + rawg / N
normrec.append([np.linalg.norm(rawg)])
avgrec.append([np.mean(rawg)])
varrec.append([np.var(rawg)])
else:
avglist[i] = avglist[i]*Nm/N + rawg/N
normrec[i].append(normrec[i][-1]*Nm/N + np.linalg.norm(rawg)/N)
avgrec[i].append(np.mean(rawg))
varrec[i].append(np.var(rawg))
def save_stats(path):
if not os.path.exists(path):
os.makedirs(path)
with open(path + 'total-grads-mean.npy', 'wb') as fag:
pickle.dump(avglist, fag)
with open(path + 'grads-norms.npy', 'wb') as fn:
pickle.dump(normrec, fn)
with open(path + 'grads-mean.npy', 'wb') as fm:
pickle.dump(avgrec, fm)
with open(path + 'grads-var.npy', 'wb') as fv:
pickle.dump(varrec, fv)
def load_layers(model,path):
# get a list of the restored parameters - exclude the new layer
new_plist = []
for p in model.parameters():
new_plist.append(p)
new_layer = (args.nlayers - 1) if args.new_layer is None else args.new_layer
idx = 1 + 4 * new_layer
new_plist[idx:idx + 4] = []
# load old model and restore parameters:
temp_model = torch.load(path)
for i, p in enumerate(temp_model.parameters()):
new_plist[i].data = p.data
return model
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# if torch.cuda.is_available():
# if not args.cuda:
# print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# else:
# torch.cuda.manual_seed_all(args.seed)
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
model = torch.load(os.path.join(args.save, 'model.pt'))
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
total_params = sum(x.data.nelement() for x in model.parameters())
logging('Args: {}'.format(args))
logging('Model total parameters: {}'.format(total_params))
criterion = nn.CrossEntropyLoss()
###############################################################################
# Training code
###############################################################################
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
# def train(epoch):
# assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
#
# gradlist = None
# # Turn on training mode which enables dropout.
# total_loss = 0
# start_time = time.time()
# ntokens = len(corpus.dictionary)
# hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
# batch, i, N = 0, 0, 1
# while i < train_data.size(0) - 1 - 1:
# bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# # Prevent excessively small or negative sequence lengths
# seq_len = max(5, int(np.random.normal(bptt, 5)))
# # There's a very small chance that it could select a very long sequence length resulting in OOM
# seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
#
# lr2 = optimizer.param_groups[0]['lr']
# optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
# model.train()
# data, targets = get_batch(train_data, i, args, seq_len=seq_len)
#
# optimizer.zero_grad()
#
# start, end, s_id = 0, args.small_batch_size, 0
# while start < args.batch_size:
# cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
#
# # Starting each batch, we detach the hidden state from how it was previously produced.
# # If we didn't, the model would try backpropagating all the way to start of the dataset.
# hidden[s_id] = repackage_hidden(hidden[s_id])
#
# log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
# raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
#
# loss = raw_loss
# # Activiation Regularization
# loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# # Temporal Activation Regularization (slowness)
# loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
# loss *= args.small_batch_size / args.batch_size
# total_loss += raw_loss.data * args.small_batch_size / args.batch_size
# loss.backward()
# if args.record_stats:
# if gradlist is None:
# gradlist = []
# for p in model.parameters():
# gradlist.append(p.grad)
# record_stats(gradlist, epoch, N)
# N += 1
# s_id += 1
# start = end
# end = start + args.small_batch_size
#
# gc.collect()
#
# # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
# if args.lwgc:
# plist = []
# for param in model.parameters():
# plist.append(param)
# # embeddings clip:
# emb_params = [plist[0], plist[-1]]
# torch.nn.utils.clip_grad_norm(emb_params, args.clip[0])
# # layers clip:
# for idx in range(1,4*args.nlayers,4):
# l_params = plist[idx:idx+4]
# torch.nn.utils.clip_grad_norm(l_params, args.clip[1+idx//4])
# # MoS clip:
# mos_params = plist[-4:-1]
# torch.nn.utils.clip_grad_norm(mos_params, args.clip[-1])
# else:
# torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
# optimizer.step()
#
# # total_loss += raw_loss.data
# optimizer.param_groups[0]['lr'] = lr2
# if batch % args.log_interval == 0 and batch > 0:
# cur_loss = total_loss[0] / args.log_interval
# elapsed = time.time() - start_time
# logging('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
# 'loss {:5.2f} | ppl {:8.2f}'.format(
# epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
# elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
# total_loss = 0
# start_time = time.time()
# ###
# batch += 1
# i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
val_perp_list = []
# At any point you can hit Ctrl + C to break out of training early.
try:
# if args.continue_train:
# optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
# if 't0' in optimizer_state['param_groups'][0]:
# optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
# else:
# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
# optimizer.load_state_dict(optimizer_state)
# fname = '/'.join(args.save.split('/')[:-1]) + '/graph/' + args.save.split('/')[-1] + '.npy'
# with open(fname, 'rb') as fv:
# val_perp_list = pickle.load(fv)
# fv.close()
# else:
# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
epoch_start_time = time.time()
val_loss2 = evaluate(val_data)
logging('-' * 89)
logging('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(0, (time.time() - epoch_start_time),
val_loss2, math.exp(val_loss2)))
test_loss = evaluate(test_data, test_batch_size)
logging('=' * 89)
logging('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
logging('=' * 89)
logging('recording to CSV file')
logging('=' * 89)
except KeyboardInterrupt:
logging('-' * 89)
logging('Exiting from training early')
# log_csv(math.exp(test_loss))
| 17,935 | 40.614849 | 132 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/dynamiceval.py | import argparse
import time
import math
import numpy as np
import os
import csv
import torch
import pickle
import torch.nn as nn
from torch.autograd import Variable
import data
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='data/penn/',
help='location of the data corpus')
parser.add_argument('--dir', type=str, default='./GL/L2/Best/',
help='path to the final model directory')
parser.add_argument('--model', type=str, default='finetune_model.pt',
help='name of model to eval')
parser.add_argument('--gpu', type=int, default=0,
help='set gpu device ID (-1 for cpu)')
parser.add_argument('--val', action='store_true',
help='set for validation error, test by default')
parser.add_argument('--lamb', type=float, default=0.075,
help='decay parameter lambda')
parser.add_argument('--epsilon', type=float, default=0.001,
help='stabilization parameter epsilon')
parser.add_argument('--epsilonu', type=float, default=0.001,
help='stabilization parameter epsilon for adadelta dividend')
parser.add_argument('--lr', type=float, default=0.002,
help='learning rate eta')
parser.add_argument('--ms', action='store_true', default=False,
help='uses mean squared gradients instead of sum squared')
parser.add_argument('--batch_size', type=int, default=70,
help='batch size for gradient statistics')
parser.add_argument('--bptt', type=int, default=5,
help='sequence/truncation length')
parser.add_argument('--max_batches', type=int, default=-1,
help='maximum number of training batches for gradient statistics')
parser.add_argument('--msg_calc', type=bool, default=True,
help='calculate msg before dynamic evaluation')
parser.add_argument('--msg_path', type=str, default='./msg/',
help='path to load msg from')
parser.add_argument('--mode', type=str, default='msg',
help='dynamic evaluation mode, options are: sgd, msg, adadelta')
parser.add_argument('--gamma', type=float, default=0.95,
help='moving average decay parameter')
# parser.add_argument('--n_experts', type=int, default=10, help='number of experts')
args = parser.parse_args()
if args.gpu>=0:
args.cuda = True
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
else:
#to run on cpu, model must have been trained on cpu
args.cuda=False
def log_results(s, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(os.path.join(args.dir, 'dynamic_eval_results.txt'), 'a+') as f_res:
f_res.write(s + '\n')
def log_csv(perp):
fname = args.dir + 'results.csv'
with open(fname, 'a', newline='') as fd:
fdwriter = csv.writer(fd, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
dicta = vars(args)
line = []
keyslist = sorted(dicta, key=str.lower)
for k in keyslist:
if isinstance(dicta[k], list):
dicta[k] = '-'.join(str(dicta[k]).split(','))
dicta[k] = dicta[k].replace(" ", "")
dicta[k] = dicta[k].replace("[", "")
dicta[k] = dicta[k].replace("]", "")
line += [str(dicta[k])]
line += [perp]
fdwriter.writerow(line)
fd.close()
model_name=os.path.join(args.dir + args.model)
start_time = time.time()
print('loading')
corpus = data.Corpus(args.data)
eval_batch_size = 1
test_batch_size = 1
lr = args.lr
lamb = args.lamb
epsilon = args.epsilon
epsilonu = args.epsilonu
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
#######################################################################
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def get_batch(source, i, evaluation=False):
seq_len = min(args.bptt, len(source) - 1 - i)
data = Variable(source[i:i+seq_len], volatile=evaluation)
target = Variable(source[i+1:i+1+seq_len].view(-1))
return data, target
def get_msg_file():
msg_file_name = 'MSG_bptt' + str(args.bptt)
if not args.ms:
msg_file_name += '_batch.npy'
else:
msg_file_name += '.npy'
return os.path.join(args.msg_path,msg_file_name)
def gradstat():
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
batch, i = 0, 0
for param in model.parameters():
param.MS = 0*param.data
while i < train_data.size(0) - 1 - 1:
seq_len = args.bptt
model.eval()
data, targets = get_batch(train_data, i)
hidden = repackage_hidden(hidden)
model.zero_grad()
#assumes model has atleast 2 returns, and first is output and second is hidden
log_prob, hidden = model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets)
loss.backward()
for param in model.parameters():
param.MS = param.MS + param.grad.data*param.grad.data
total_loss += loss.data
batch += 1
i += seq_len
if args.max_batches>0:
if batch>= args.max_batches:
break
gsum = 0
msg_list = []
for param in model.parameters():
if args.ms:
param.MS = torch.sqrt(param.MS/batch)
else:
param.MS = torch.sqrt(param.MS)
msg_list.append(param.MS.cpu().numpy())
gsum+=torch.mean(param.MS)
decrate_list = []
for param in model.parameters():
param.decrate = param.MS/gsum
decrate_list.append(param.decrate.cpu().numpy())
print(40*'-')
# msg_file_name = get_msg_file()
# print('saving MSG statistics to: ' + msg_file_name)
# fp = open(msg_file_name, 'wb')
# pickle.dump([msg_list, decrate_list], fp)
# fp.close()
def gradstatload():
msg_file_name = get_msg_file()
fp = open(msg_file_name, 'rb')
list = pickle.load(fp)
msg_list = list[0]
decrate_list = list[1]
fp.close()
idx = 0
for param in model.parameters():
if args.cuda:
param.MS = torch.from_numpy(msg_list[idx]).type(torch.cuda.FloatTensor)
param.decrate = torch.from_numpy(decrate_list[idx]).type(torch.cuda.FloatTensor)
else:
param.MS = torch.from_numpy(msg_list[idx]).type(torch.FloatTensor)
param.decrate = torch.from_numpy(decrate_list[idx]).type(torch.FloatTensor)
idx += 1
def evaluate_msg():
#clips decay rates at 1/lamb
#otherwise scaled decay rates can be greater than 1
#would cause decay updates to overshoot
for param in model.parameters():
if args.cuda:
decratenp = param.decrate.cpu().numpy()
ind = np.nonzero(decratenp>(1/lamb))
decratenp[ind] = (1/lamb)
param.decrate = torch.from_numpy(decratenp).type(torch.cuda.FloatTensor)
param.data0 = 1*param.data
else:
decratenp = param.decrate.numpy()
ind = np.nonzero(decratenp>(1/lamb))
decratenp[ind] = (1/lamb)
param.decrate = torch.from_numpy(decratenp).type(torch.FloatTensor)
param.data0 = 1*param.data
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
batch, i = 0, 0
last = False
seq_len= args.bptt
seq_len0 = seq_len
#loops through data
while i < eval_data.size(0) - 1 - 1:
model.eval()
#gets last chunk of seqlence if seqlen doesn't divide full sequence cleanly
if (i+seq_len)>=eval_data.size(0):
if last:
break
seq_len = eval_data.size(0)-i-1
last = True
data, targets = get_batch(eval_data,i)
hidden = repackage_hidden(hidden)
model.zero_grad()
#assumes model has atleast 2 returns, and first is output and second is hidden
log_prob, hidden = model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets)
#compute gradient on sequence segment loss
loss.backward()
#update rule
for param in model.parameters():
dW = lamb*param.decrate*(param.data0-param.data)-lr*param.grad.data/(param.MS+epsilon)
param.data+=dW
#seq_len/seq_len0 will be 1 except for last sequence
#for last sequence, we downweight if sequence is shorter
total_loss += (seq_len/seq_len0)*loss.data
batch += (seq_len/seq_len0)
i += seq_len
#since entropy of first token was never measured
#can conservatively measure with uniform distribution
#makes very little difference, usually < 0.01 perplexity point
#total_loss += (1/seq_len0)*torch.log(torch.from_numpy(np.array([ntokens])).type(torch.cuda.FloatTensor))
#batch+=(1/seq_len0)
perp = torch.exp(total_loss/batch)
if args.cuda:
return perp.cpu().numpy()
else:
return perp.numpy()
def evaluate_adadelta():
# clips decay rates at 1/lamb
# otherwise scaled decay rates can be greater than 1
# would cause decay updates to overshoot
for param in model.parameters():
if args.cuda:
decratenp = param.decrate.cpu().numpy()
ind = np.nonzero(decratenp > (1 / lamb))
decratenp[ind] = (1 / lamb)
param.decrate = torch.from_numpy(decratenp).type(torch.cuda.FloatTensor)
param.data0 = 1 * param.data
else:
decratenp = param.decrate.numpy()
ind = np.nonzero(decratenp > (1 / lamb))
decratenp[ind] = (1 / lamb)
param.decrate = torch.from_numpy(decratenp).type(torch.FloatTensor)
param.data0 = 1 * param.data
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
batch, i = 0, 0
last = False
seq_len = args.bptt
seq_len0 = seq_len
# create
for param in model.parameters():
param.MSdelta = torch.ones_like(param.MS)
# loops through data
while i < eval_data.size(0) - 1 - 1:
model.eval()
# gets last chunk of seqlence if seqlen doesn't divide full sequence cleanly
if (i + seq_len) >= eval_data.size(0):
if last:
break
seq_len = eval_data.size(0) - i - 1
last = True
data, targets = get_batch(eval_data, i)
hidden = repackage_hidden(hidden)
model.zero_grad()
# assumes model has atleast 2 returns, and first is output and second is hidden
log_prob, hidden = model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets)
# compute gradient on sequence segment loss
loss.backward()
# update rule
for param in model.parameters():
# recalculate MSg for each parameter
# calculate current dW
# update MSdelta for next steps
# update parameters
param.MS = args.gamma * param.MS + (1 - args.gamma) * (param.grad.data * param.grad.data)
MSg = torch.sqrt(param.MS / batch) if args.ms else torch.sqrt(param.MS)
MSdelta = torch.sqrt(param.MSdelta / batch) if args.ms else torch.sqrt(param.MSdelta)
dW = lamb * param.decrate * (param.data0 - param.data) - lr * param.grad.data * (MSdelta + epsilonu) / (MSg + epsilon)
param.MSdelta = args.gamma * param.MSdelta + (1 - args.gamma) * (dW * dW)
param.data += dW
# seq_len/seq_len0 will be 1 except for last sequence
# for last sequence, we downweight if sequence is shorter
total_loss += (seq_len / seq_len0) * loss.data
batch += (seq_len / seq_len0)
i += seq_len
# since entropy of first token was never measured
# can conservatively measure with uniform distribution
# makes very little difference, usually < 0.01 perplexity point
# total_loss += (1/seq_len0)*torch.log(torch.from_numpy(np.array([ntokens])).type(torch.cuda.FloatTensor))
# batch+=(1/seq_len0)
perp = torch.exp(total_loss / batch)
if args.cuda:
return perp.cpu().numpy()
else:
return perp.numpy()
#load model
with open(model_name, 'rb') as f:
model = torch.load(f)
if not isinstance(model.dropouth, list):
model.dropouth = model.nlayers * [model.dropouth]
ntokens = len(corpus.dictionary)
criterion = nn.CrossEntropyLoss()
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, test_batch_size)
if args.val== True:
eval_data= val_data
else:
eval_data=test_data
train_data = batchify(corpus.train, args.batch_size)
print('collecting gradient statistics')
#collect gradient statistics on training data
if args.msg_calc:
gradstat()
else:
gradstatload()
#change batch size to 1 for dynamic eval
args.batch_size=1
print(40*'-')
#apply dynamic evaluation
evaluate = {'msg': evaluate_msg, 'adadelta': evaluate_adadelta}
evaluate_op = evaluate.get(args.mode, 'msg')
print('running dynamic evaluation ' + args.mode)
loss = evaluate_op()
print('perplexity loss: ' + str(loss[0]))
print('dynamic evaluation time: %d' % (time.time() - start_time))
log_results('Args: {}'.format(args))
log_results(args.mode + ' perplexity loss: ' + str(loss[0]))
log_csv(str(loss[0]))
| 14,131 | 32.251765 | 130 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/generate.py | ###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
from torch.autograd import Variable
import data
parser = argparse.ArgumentParser(description='PyTorch PTB Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./penn',
help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f)
model.eval()
if args.cuda:
model.cuda()
else:
model.cpu()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(1)
input = Variable(torch.rand(1, 1).mul(ntokens).long(), volatile=True)
if args.cuda:
input.data = input.data.cuda()
with open(args.outf, 'w') as outf:
for i in range(args.words):
output, hidden = model(input, hidden, return_prob=True)
word_weights = output.squeeze().data.div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.data.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
| 2,610 | 33.813333 | 88 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/locked_dropout.py | import torch
import torch.nn as nn
from torch.autograd import Variable
class LockedDropout(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, dropout=0.5):
if not self.training or not dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
# mask = Variable(m, requires_grad=False) / (1 - dropout)
mask = Variable(m.div_(1 - dropout), requires_grad=False)
mask = mask.expand_as(x)
return mask * x
| 522 | 29.764706 | 71 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/utils.py | import os, shutil
import torch
from torch.autograd import Variable
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, args):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
print(data.size())
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, args, seq_len=None, evaluation=False):
seq_len = min(seq_len if seq_len else args.bptt, len(source) - 1 - i)
data = Variable(source[i:i+seq_len], volatile=evaluation)
# target = Variable(source[i+1:i+1+seq_len].view(-1))
target = Variable(source[i+1:i+1+seq_len])
return data, target
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def save_checkpoint(model, optimizer, path, finetune=False):
if finetune:
torch.save(model, os.path.join(path, 'finetune_model.pt'))
torch.save(optimizer.state_dict(), os.path.join(path, 'finetune_optimizer.pt'))
else:
torch.save(model, os.path.join(path, 'model.pt'))
torch.save(optimizer.state_dict(), os.path.join(path, 'optimizer.pt'))
| 1,845 | 36.673469 | 87 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/model.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from embed_regularize import embedded_dropout
from locked_dropout import LockedDropout
from weight_drop import WeightDrop
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nhidlast, nlayers,
dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0,
tie_weights=False, ldropout=0.5, n_experts=10):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropout()
self.encoder = nn.Embedding(ntoken, ninp)
self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else nhidlast, 1, dropout=0) for l in range(nlayers)]
if wdrop:
self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]
self.rnns = torch.nn.ModuleList(self.rnns)
self.prior = nn.Linear(nhidlast, n_experts, bias=False)
self.latent = nn.Sequential(nn.Linear(nhidlast, n_experts*ninp), nn.Tanh())
self.decoder = nn.Linear(ninp, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
#if nhid != ninp:
# raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.ninp = ninp
self.nhid = nhid
self.nhidlast = nhidlast
self.nlayers = nlayers
self.dropout = dropout
self.dropouti = dropouti
self.dropoute = dropoute
self.dropouth = dropouth
self.ldropout = ldropout
self.dropoutl = ldropout
self.n_experts = n_experts
self.ntoken = ntoken
size = 0
for p in self.parameters():
size += p.nelement()
print('param size: {}'.format(size))
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden, return_h=False, return_prob=False):
batch_size = input.size(1)
emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)
#emb = self.idrop(emb)
emb = self.lockdrop(emb, self.dropouti)
raw_output = emb
new_hidden = []
#raw_output, hidden = self.rnn(emb, hidden)
raw_outputs = []
outputs = []
for l, rnn in enumerate(self.rnns):
current_input = raw_output
raw_output, new_h = rnn(raw_output, hidden[l])
new_hidden.append(new_h)
raw_outputs.append(raw_output)
if l != self.nlayers - 1:
#self.hdrop(raw_output)
raw_output = self.lockdrop(raw_output, self.dropouth[l])
outputs.append(raw_output)
hidden = new_hidden
output = self.lockdrop(raw_output, self.dropout)
outputs.append(output)
latent = self.latent(output)
latent = self.lockdrop(latent, self.dropoutl)
logit = self.decoder(latent.view(-1, self.ninp))
prior_logit = self.prior(output).contiguous().view(-1, self.n_experts)
prior = nn.functional.softmax(prior_logit)
prob = nn.functional.softmax(logit.view(-1, self.ntoken)).view(-1, self.n_experts, self.ntoken)
prob = (prob * prior.unsqueeze(2).expand_as(prob)).sum(1)
if return_prob:
model_output = prob
else:
log_prob = torch.log(prob.add_(1e-8))
model_output = log_prob
model_output = model_output.view(-1, batch_size, self.ntoken)
if return_h:
return model_output, hidden, raw_outputs, outputs
return model_output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return [(Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.nhidlast).zero_()),
Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.nhidlast).zero_()))
for l in range(self.nlayers)]
if __name__ == '__main__':
model = RNNModel('LSTM', 10, 12, 12, 12, 2)
input = Variable(torch.LongTensor(13, 9).random_(0, 10))
hidden = model.init_hidden(9)
model(input, hidden)
| 4,848 | 36.3 | 139 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/data.py | import os
import torch
from collections import Counter
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.total = 0
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
self.total += 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(path + '.train.txt')
self.valid = self.tokenize(path + '.valid.txt')
self.test = self.tokenize(path + '.test.txt')
def tokenize(self, path):
"""Tokenizes a text file."""
print(path)
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class SentCorpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
sents = []
with open(path, 'r', encoding='utf-8') as f:
for line in f:
if not line:
continue
words = line.split() + ['<eos>']
sent = torch.LongTensor(len(words))
for i, word in enumerate(words):
sent[i] = self.dictionary.word2idx[word]
sents.append(sent)
return sents
class BatchSentLoader(object):
def __init__(self, sents, batch_size, pad_id=0, cuda=False, volatile=False):
self.sents = sents
self.batch_size = batch_size
self.sort_sents = sorted(sents, key=lambda x: x.size(0))
self.cuda = cuda
self.volatile = volatile
self.pad_id = pad_id
def __next__(self):
if self.idx >= len(self.sort_sents):
raise StopIteration
batch_size = min(self.batch_size, len(self.sort_sents)-self.idx)
batch = self.sort_sents[self.idx:self.idx+batch_size]
max_len = max([s.size(0) for s in batch])
tensor = torch.LongTensor(max_len, batch_size).fill_(self.pad_id)
for i in range(len(batch)):
s = batch[i]
tensor[:s.size(0),i].copy_(s)
if self.cuda:
tensor = tensor.cuda()
self.idx += batch_size
return tensor
next = __next__
def __iter__(self):
self.idx = 0
return self
if __name__ == '__main__':
corpus = SentCorpus('../penn')
loader = BatchSentLoader(corpus.test, 10)
for i, d in enumerate(loader):
print(i, d.size())
| 3,989 | 29.692308 | 80 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/finetune.py | import argparse
import ast
import time
import math
import numpy as np
np.random.seed(331)
import torch
import torch.nn as nn
import data
import model
import os
from utils import batchify, get_batch, repackage_hidden, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='./data/penn/',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=280,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=960,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=960,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=3,
help='number of layers')
parser.add_argument('--lr', type=float, default=25,
help='initial learning rate')
parser.add_argument('--clip', type=str, default='[0.25,0.1,0.15,0.15]',
help='maximum gradient norm, given as a list with value for each layer. '
'for lwgc reffer to the structure: [emb, L0, L1, ..., Ln]')
parser.add_argument('--epochs', type=int, default=250,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=12, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=70,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.4,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=str, default='[0.225]',
help='dropout for rnn layers (0 = no dropout), given as a list with value for each layer')
parser.add_argument('--dropouti', type=float, default=0.65,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--dropoutl', type=float, default=-0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.5,
help='amount of weight dropout to apply to the RNN hidden to hidden matrix')
parser.add_argument('--tied', action='store_false',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=28,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, required=False, default='./GL/L0/RUN-0/',
help='path to the directory that save the final model')
parser.add_argument('--alpha', type=float, default=2,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=1.2e-6,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--n_experts', type=int, default=15,
help='number of experts')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=40,
help='max sequence length')
parser.add_argument('--lwgc', default=True, help='use layer-wise gradient clipping')
parser.add_argument('--single_gpu', action='store_true', help='use single GPU')
parser.add_argument('--gpu', type=str, default=None,
help='set gpu device ID (-1 for cpu)')
args = parser.parse_args()
if args.gpu is not None:
args.cuda = True
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
#to run on cpu, model must have been trained on cpu
args.cuda=False
args.dropouth = ast.literal_eval(args.dropouth)
print('finetune load path: {}/model.pt. '.format(args.save))
print('log save path: {}/finetune_log.txt'.format(args.save))
print('model save path: {}/finetune_model.pt'.format(args.save))
log_file = os.path.join(args.save, 'finetune_log.txt')
args.clip = ast.literal_eval(args.clip)
if not args.lwgc:
args.clip = args.clip[0]
if not args.continue_train:
if os.path.exists(log_file):
os.remove(log_file)
def logging(s, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(log_file, 'a+') as f_log:
f_log.write(s + '\n')
if args.dropoutl < 0:
args.dropoutl = args.dropouth
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed_all(args.seed)
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'finetune_model.pt'))
else:
model = torch.load(os.path.join(args.save, 'model.pt'))
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
total_params = sum(x.size()[0] * x.size()[1] if len(x.size()) > 1 else x.size()[0] for x in model.parameters())
logging('Args: {}'.format(args))
logging('Model total parameters: {}'.format(total_params))
criterion = nn.CrossEntropyLoss()
###############################################################################
# Training code
###############################################################################
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += len(data) * loss
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
seq_len = max(5, int(np.random.normal(bptt, 5)))
# There's a very small chance that it could select a very long sequence length resulting in OOM
seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization - AR
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness) - TAR
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
s_id += 1
start = end
end = start + args.small_batch_size
# gradient norm clipping or LWGC -
# helps prevent the exploding gradient problem in RNNs and reduce covariate shift.
if args.lwgc:
plist = []
for param in model.parameters():
plist.append(param)
# embeddings clip
emb_params = [plist[0], plist[-1]]
torch.nn.utils.clip_grad_norm(emb_params, args.clip[0])
# layers clip
for idx in range(1,4*args.nlayers,4):
l_params = plist[idx:idx+4]
torch.nn.utils.clip_grad_norm(l_params, args.clip[1+idx//4])
# MoS clip
mos_params = plist[-4:-1]
torch.nn.utils.clip_grad_norm(mos_params, args.clip[-1])
else:
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
###
batch += 1
i += seq_len
lr = args.lr
stored_loss = evaluate(val_data)
best_val_loss = []
# At any point you can hit Ctrl + C to break out of training early.
try:
#optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'finetune_optimizer.pt'))
optimizer.load_state_dict(optimizer_state)
# Loop over epochs.
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
if 't0' in optimizer.param_groups[0]:
tmp = {}
for prm in model.parameters():
tmp[prm] = prm.data.clone()
prm.data = optimizer.state[prm]['ax'].clone()
val_loss2 = evaluate(val_data)
logging('-' * 89)
logging('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss2, math.exp(val_loss2)))
logging('-' * 89)
if val_loss2 < stored_loss:
save_checkpoint(model, optimizer, args.save, finetune=True)
logging('Saving Averaged!')
stored_loss = val_loss2
for prm in model.parameters():
prm.data = tmp[prm].clone()
if (len(best_val_loss)>args.nonmono and val_loss2 > min(best_val_loss[:-args.nonmono])):
logging('Done!')
break
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
#optimizer.param_groups[0]['lr'] /= 2.
best_val_loss.append(val_loss2)
# setting loop breaking at convergence saturation
except KeyboardInterrupt:
logging('-' * 89)
logging('Exiting from training early')
# Load the best saved model.
model = torch.load(os.path.join(args.save, 'finetune_model.pt'))
parallel_model = nn.DataParallel(model, dim=1).cuda()
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
logging('=' * 89)
logging('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
logging('=' * 89)
| 14,244 | 42.696319 | 132 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/weight_drop.py | import torch
from torch.nn import Parameter
from functools import wraps
class WeightDrop(torch.nn.Module):
def __init__(self, module, weights, dropout=0, variational=False):
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self.variational = variational
self._setup()
def widget_demagnetizer_y2k_edition(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
# It must be a function rather than a lambda as otherwise pickling explodes
# We can't write boring code though, so ... WIDGET DEMAGNETIZER Y2K EDITION!
# (╯°□°)╯︵ ┻━┻
return
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition
for name_w in self.weights:
print('Applying weight drop of {} to {}'.format(self.dropout, name_w))
w = getattr(self.module, name_w)
del self.module._parameters[name_w]
self.module.register_parameter(name_w + '_raw', Parameter(w.data))
def _setweights(self):
for name_w in self.weights:
raw_w = getattr(self.module, name_w + '_raw')
w = None
if self.variational:
mask = torch.autograd.Variable(torch.ones(raw_w.size(0), 1))
if raw_w.is_cuda: mask = mask.cuda()
mask = torch.nn.functional.dropout(mask, p=self.dropout, training=True)
w = mask.expand_as(raw_w) * raw_w
else:
w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)
setattr(self.module, name_w, w)
def forward(self, *args):
self._setweights()
return self.module.forward(*args)
if __name__ == '__main__':
import torch
from weight_drop import WeightDrop
# Input is (seq, batch, input)
x = torch.autograd.Variable(torch.randn(2, 1, 10)).cuda()
h0 = None
###
print('Testing WeightDrop')
print('=-=-=-=-=-=-=-=-=-=')
###
print('Testing WeightDrop with Linear')
lin = WeightDrop(torch.nn.Linear(10, 10), ['weight'], dropout=0.9)
lin.cuda()
run1 = [x.sum() for x in lin(x).data]
run2 = [x.sum() for x in lin(x).data]
print('All items should be different')
print('Run 1:', run1)
print('Run 2:', run2)
assert run1[0] != run2[0]
assert run1[1] != run2[1]
print('---')
###
print('Testing WeightDrop with LSTM')
wdrnn = WeightDrop(torch.nn.LSTM(10, 10), ['weight_hh_l0'], dropout=0.9)
wdrnn.cuda()
run1 = [x.sum() for x in wdrnn(x, h0)[0].data]
run2 = [x.sum() for x in wdrnn(x, h0)[0].data]
print('First timesteps should be equal, all others should differ')
print('Run 1:', run1)
print('Run 2:', run2)
# First time step, not influenced by hidden to hidden weights, should be equal
assert run1[0] == run2[0]
# Second step should not
assert run1[1] != run2[1]
print('---')
| 3,199 | 31 | 94 | py |
gradual-learning-rnn | gradual-learning-rnn-master/pytorch_impl/embed_regularize.py | import numpy as np
import torch
from torch.autograd import Variable
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
mask = Variable(mask)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
X = embed._backend.Embedding.apply(words, masked_embed_weight,
padding_idx, embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse
)
return X
if __name__ == '__main__':
V = 50
h = 4
bptt = 10
batch_size = 2
embed = torch.nn.Embedding(V, h)
words = np.random.random_integers(low=0, high=V-1, size=(batch_size, bptt))
words = torch.LongTensor(words)
words = Variable(words)
origX = embed(words)
X = embedded_dropout(embed, words)
print(origX)
print(X)
| 1,089 | 24.952381 | 133 | py |
sequer | sequer-main/code/tensor2tensor/setup.py | """Install tensor2tensor."""
from setuptools import find_packages
from setuptools import setup
setup(
name='tensor2tensor',
version='1.14.1',
description='Tensor2Tensor',
author='Google Inc.',
author_email='no-reply@google.com',
url='http://github.com/tensorflow/tensor2tensor',
license='Apache 2.0',
packages=find_packages(),
package_data={
'tensor2tensor.data_generators': ['test_data/*'],
'tensor2tensor.data_generators.wikisum': ['test_data/*'],
'tensor2tensor.visualization': [
'attention.js', 'TransformerVisualization.ipynb'
],
},
scripts=[
'tensor2tensor/bin/t2t-trainer',
'tensor2tensor/bin/t2t-datagen',
'tensor2tensor/bin/t2t-decoder',
'tensor2tensor/bin/t2t-make-tf-configs',
'tensor2tensor/bin/t2t-eval',
'tensor2tensor/bin/t2t-exporter',
'tensor2tensor/bin/t2t-query-server',
'tensor2tensor/bin/t2t-insights-server',
'tensor2tensor/bin/t2t-avg-all',
'tensor2tensor/bin/t2t-bleu',
'tensor2tensor/bin/t2t-translate-all',
],
install_requires=[
'bz2file',
'dopamine-rl',
'flask',
'future',
'gevent',
'gin-config',
'google-api-python-client',
'gunicorn',
'gym',
'h5py',
'kfac',
'mesh-tensorflow',
'numpy',
'oauth2client',
'opencv-python',
'Pillow',
'pypng',
'requests',
'scipy',
'six',
'sympy',
'tensorflow-datasets',
'tensorflow-gan',
'tensorflow-probability==0.7.0',
'tqdm',
],
extras_require={
'tensorflow': ['tensorflow>=1.14.0'],
'tensorflow_gpu': ['tensorflow-gpu>=1.14.0'],
'tensorflow-hub': ['tensorflow-hub>=0.1.1'],
'tests': [
'absl-py',
# Needed to fix a Travis pytest error.
# https://github.com/Julian/jsonschema/issues/449#issuecomment-411406525
'attrs>=17.4.0',
'pytest>=3.8.0',
'mock',
'pylint',
'jupyter',
'matplotlib',
# Need atari extras for Travis tests, but because gym is already in
# install_requires, pip skips the atari extras, so we instead do an
# explicit pip install gym[atari] for the tests.
# 'gym[atari]',
],
'trax': [
'jax',
'jaxlib',
],
'allen': ['Pillow==5.1.0', 'pandas==0.23.0'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
dependency_links=[
'git+https://github.com/tensorflow/cleverhans.git#egg=cleverhans'
],
keywords='tensorflow machine learning',
)
| 3,012 | 29.434343 | 84 | py |
sequer | sequer-main/code/tensor2tensor/tensor2tensor/envs/env_problem_utils.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to deal with EnvProblem."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import time
import gym
import numpy as np
from tensor2tensor.envs import gym_env_problem
from tensor2tensor.envs import rendered_env_problem
from tensor2tensor.rl import gym_utils
def done_indices(dones):
"""Calculates the indices where dones has True."""
return np.argwhere(dones).squeeze(axis=1)
def play_env_problem_randomly(env_problem, num_steps):
"""Plays the env problem by randomly sampling actions for `num_steps`."""
# Reset all environments.
env_problem.reset()
# Play all environments, sampling random actions each time.
for _ in range(num_steps):
# Sample batch_size actions from the action space and stack them.
actions = np.stack([
env_problem.action_space.sample() for _ in range(env_problem.batch_size)
])
# Execute actions, observations are stored in `env_problem`.
_, _, dones, _ = env_problem.step(actions)
# Get the indices where we are done and reset those.
env_problem.reset(indices=done_indices(dones))
def get_completed_trajectories_from_env(env,
n_trajectories,
raw_trajectory=False):
"""Returns completed `n_trajectories` from `env`."""
# Just the raw trajectories.
if raw_trajectory:
return env.trajectories.completed_trajectories[:n_trajectories]
# The numpy version of the above.
completed_trajectories = []
for trajectory in env.trajectories.completed_trajectories[:n_trajectories]:
completed_trajectories.append(trajectory.as_numpy)
return completed_trajectories
def play_env_problem_with_policy(env,
policy_fun,
num_trajectories=1,
max_timestep=None,
reset=True,
state=None,
rng=None,
temperature=1.0,
boundary=32,
len_history_for_policy=32,
num_to_keep=1,
abort_fn=None,
raw_trajectory=False):
"""Plays the given env with the policy function to collect trajectories.
Args:
env: environment object, should be a subclass of env_problem.EnvProblem.
policy_fun: callable, taking in observations((B, RT) + OBS) and returning
back log-probabilities (B, AT, A).
num_trajectories: int, number of trajectories to collect.
max_timestep: int or None, if not None or a negative number, we cut any
trajectory that exceeds this time put it in the completed bin, and *dont*
reset the env.
reset: bool, true if we want to reset the envs. The envs are also reset if
max_max_timestep is None or < 0.
state: the state for `policy_fn`.
rng: jax rng, splittable.
temperature: float, temperature used in Gumbel sampling.
boundary: int, pad the sequences to the multiples of this number.
len_history_for_policy: int or None, the maximum history to keep for
applying the policy on. If None, use the whole history.
num_to_keep: int, while truncating trajectory how many time-steps to keep.
abort_fn: callable, If not None, then at every step call and abort the
trajectory collection if it returns True, if so reset the env and return
None.
raw_trajectory: bool, if True a list of trajectory.Trajectory objects is
returned, otherwise a list of numpy representations of
`trajectory.Trajectory` is returned.
Returns:
A tuple, (trajectories, number of completed trajectories). Where
trajectories is a list of triples of (observation, action, reward) ndarrays.
"""
def gumbel_sample(log_probs):
"""Gumbel sampling."""
u = np.random.uniform(low=1e-6, high=1.0 - 1e-6, size=log_probs.shape)
g = -np.log(-np.log(u))
return np.argmax((log_probs / temperature) + g, axis=-1)
# We need to reset all environments, if we're coming here the first time.
if reset or max_timestep is None or max_timestep <= 0:
env.reset()
else:
# Clear completed trajectories held internally.
env.trajectories.clear_completed_trajectories()
num_done_trajectories = 0
policy_application_total_time = 0
env_actions_total_time = 0
bare_env_run_time = 0
while env.trajectories.num_completed_trajectories < num_trajectories:
# Check if we should abort and return nothing.
if abort_fn and abort_fn():
# We should also reset the environment, since it will have some
# trajectories (complete and incomplete) that we want to discard.
env.reset()
return None, 0, {}, state
# Get all the observations for all the active trajectories.
# Shape is (B, RT) + OBS
# Bucket on whatever length is needed.
padded_observations, lengths = env.trajectories.observations_np(
boundary=boundary,
len_history_for_policy=len_history_for_policy)
B = padded_observations.shape[0] # pylint: disable=invalid-name
assert B == env.batch_size
assert (B,) == lengths.shape
t1 = time.time()
log_probs, value_preds, state, rng = policy_fun(
padded_observations, lengths, state=state, rng=rng)
policy_application_total_time += (time.time() - t1)
assert B == log_probs.shape[0]
actions = gumbel_sample(log_probs)
if isinstance(env.action_space, gym.spaces.Discrete):
actions = np.squeeze(actions, axis=1)
# Step through the env.
t1 = time.time()
_, _, dones, env_infos = env.step(
actions,
infos={
"log_prob_actions": log_probs,
"value_predictions": value_preds,
})
env_actions_total_time += (time.time() - t1)
bare_env_run_time += sum(
info["__bare_env_run_time__"] for info in env_infos)
# Count the number of done trajectories, the others could just have been
# truncated.
num_done_trajectories += np.sum(dones)
# Get the indices where we are done ...
done_idxs = done_indices(dones)
# ... and reset those.
t1 = time.time()
if done_idxs.size:
env.reset(indices=done_idxs)
env_actions_total_time += (time.time() - t1)
if max_timestep is None or max_timestep < 1:
continue
# Are there any trajectories that have exceeded the time-limit we want.
lengths = env.trajectories.trajectory_lengths
exceeded_time_limit_idxs = done_indices(lengths > max_timestep)
# If so, reset these as well.
t1 = time.time()
if exceeded_time_limit_idxs.size:
# This just cuts the trajectory, doesn't reset the env, so it continues
# from where it left off.
env.truncate(indices=exceeded_time_limit_idxs, num_to_keep=num_to_keep)
env_actions_total_time += (time.time() - t1)
# We have the trajectories we need, return a list of triples:
# (observations, actions, rewards)
completed_trajectories = get_completed_trajectories_from_env(
env, num_trajectories, raw_trajectory=raw_trajectory)
timing_info = {
"trajectory_collection/policy_application": policy_application_total_time,
"trajectory_collection/env_actions": env_actions_total_time,
"trajectory_collection/env_actions/bare_env": bare_env_run_time,
}
timing_info = {k: round(1000 * v, 2) for k, v in timing_info.items()}
return completed_trajectories, num_done_trajectories, timing_info, state
def make_env(batch_size=1,
env_problem_name="",
resize=True,
resize_dims=(105, 80),
max_timestep="None",
clip_rewards=True,
parallelism=1,
use_tpu=False,
**env_kwargs):
"""Creates the env."""
if clip_rewards:
env_kwargs.update({"reward_range": (-1, 1), "discrete_rewards": True})
else:
env_kwargs.update({"discrete_rewards": False})
# No resizing needed, so let's be on the normal EnvProblem.
if not resize: # None or False
return gym_env_problem.GymEnvProblem(
base_env_name=env_problem_name,
batch_size=batch_size,
parallelism=parallelism,
**env_kwargs)
try:
max_timestep = int(max_timestep)
except Exception: # pylint: disable=broad-except
max_timestep = None
wrapper_fn = functools.partial(
gym_utils.gym_env_wrapper, **{
"rl_env_max_episode_steps": max_timestep,
"maxskip_env": True,
"rendered_env": True,
"rendered_env_resize_to": resize_dims,
"sticky_actions": False,
"output_dtype": np.int32 if use_tpu else None,
})
return rendered_env_problem.RenderedEnvProblem(
base_env_name=env_problem_name,
batch_size=batch_size,
parallelism=parallelism,
env_wrapper_fn=wrapper_fn,
**env_kwargs)
| 9,598 | 35.086466 | 80 | py |
sequer | sequer-main/code/tensor2tensor/tensor2tensor/layers/common_layers.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers common to multiple models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
import math
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import inplace_ops
_cached_layers = None
# TODO(lukaszkaiser): remove this function when not needed any more.
def layers():
"""Get the layers module good for TF 1 and TF 2 work for now."""
global _cached_layers
if _cached_layers is not None:
return _cached_layers
layers_module = tf.layers
try:
from tensorflow.python import tf2 # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
if tf2.enabled():
tf.logging.info("Running in V2 mode, using Keras layers.")
layers_module = tf.keras.layers
except ImportError:
pass
_cached_layers = layers_module
return layers_module
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].get_shape()])
def convert_gradient_to_tensor(x):
"""Identity operation whose gradient is converted to a `Tensor`.
Currently, the gradient to `tf.concat` is particularly expensive to
compute if dy is an `IndexedSlices` (a lack of GPU implementation
forces the gradient operation onto CPU). This situation occurs when
the output of the `tf.concat` is eventually passed to `tf.gather`.
It is sometimes faster to convert the gradient to a `Tensor`, so as
to get the cheaper gradient for `tf.concat`. To do this, replace
`tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`.
Args:
x: A `Tensor`.
Returns:
The input `Tensor`.
"""
return x
def is_xla_compiled():
"""Whether we are building graph that will be compiled by XLA.
This checks whether the code is executing within an XLA context.
If True, model authors should ensure the graph they build is compilable by
XLA. Specifically, they should ensure that all ops have XLA implementations
and that all shapes are statically known.
Returns:
bool, whether the current graph will be compiled for XLA.
"""
ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingXLAContext(ctxt) is not None
def to_float(x):
"""Cast x to float; created because tf.to_float is deprecated."""
return tf.cast(x, tf.float32)
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):
"""Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
broadcast_dims: an optional list of integers
the dimensions along which to broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x.
"""
assert "noise_shape" not in kwargs
if broadcast_dims:
shape = tf.shape(x)
ndims = len(x.get_shape())
# Allow dimensions like "-1" as well.
broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]
kwargs["noise_shape"] = [
1 if i in broadcast_dims else shape[i] for i in range(ndims)
]
return tf.nn.dropout(x, keep_prob, **kwargs)
def comma_separated_string_to_integer_list(s):
return [int(i) for i in s.split(",") if i]
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", values=[x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
def hard_sigmoid(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
x_shifted = 0.5 * x + 0.5
return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost
def hard_tanh(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost
def inverse_exp_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay exponentially from min_value to 1.0 reached at max_step."""
inv_base = tf.exp(tf.log(min_value) / float(max_step))
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = to_float(step)
return inv_base**tf.maximum(float(max_step) - step, 0.0)
def inverse_lin_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay linearly from min_value to 1.0 reached at max_step."""
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = to_float(step)
progress = tf.minimum(step / float(max_step), 1.0)
return progress * (1.0 - min_value) + min_value
def inverse_sigmoid_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay linearly from min_value to 1.0 reached at max_step."""
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = to_float(step)
def sigmoid(x):
return 1 / (1 + tf.exp(-x))
def inv_sigmoid(y):
return tf.log(y / (1 - y))
assert min_value > 0, (
"sigmoid's output is always >0 and <1. min_value must respect "
"these bounds for interpolation to work.")
assert min_value < 0.5, "Must choose min_value on the left half of sigmoid."
# Find
# x s.t. sigmoid(x ) = y_min and
# x' s.t. sigmoid(x') = y_max
# We will map [0, max_step] to [x_min, x_max].
y_min = min_value
y_max = 1.0 - min_value
x_min = inv_sigmoid(y_min)
x_max = inv_sigmoid(y_max)
x = tf.minimum(step / float(max_step), 1.0) # [0, 1]
x = x_min + (x_max - x_min) * x # [x_min, x_max]
y = sigmoid(x) # [y_min, y_max]
y = (y - y_min) / (y_max - y_min) # [0, 1]
y = y * (1.0 - y_min) # [0, 1-y_min]
y += y_min # [y_min, 1]
return y
def shakeshake2_py(x, y, equal=False, individual=False):
"""The shake-shake sum of 2 tensors, python version."""
if equal:
alpha = 0.5
elif individual:
alpha = tf.random_uniform(tf.get_shape(x)[:1])
else:
alpha = tf.random_uniform([])
return alpha * x + (1.0 - alpha) * y
@function.Defun()
def shakeshake2_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_indiv_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, individual=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun()
def shakeshake2_equal_grad(x1, x2, dy):
"""Overriding gradient for shake-shake of 2 tensors."""
y = shakeshake2_py(x1, x2, equal=True)
dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy])
return dx
@function.Defun(grad_func=shakeshake2_grad)
def shakeshake2(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
@function.Defun(grad_func=shakeshake2_indiv_grad)
def shakeshake2_indiv(x1, x2):
return shakeshake2_py(x1, x2, individual=True)
@function.Defun(grad_func=shakeshake2_equal_grad)
def shakeshake2_eqgrad(x1, x2):
"""The shake-shake function with a different alpha for forward/backward."""
return shakeshake2_py(x1, x2)
def shakeshake(xs, equal_grad=False):
"""Multi-argument shake-shake, currently approximated by sums of 2."""
if len(xs) == 1:
return xs[0]
div = (len(xs) + 1) // 2
arg1 = shakeshake(xs[:div], equal_grad=equal_grad)
arg2 = shakeshake(xs[div:], equal_grad=equal_grad)
if equal_grad:
return shakeshake2_eqgrad(arg1, arg2)
return shakeshake2(arg1, arg2)
def convert_rgb_to_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = to_float(x)
x /= 255.0
return x
def convert_rgb_to_symmetric_real(x):
"""Conversion of pixel values to real numbers."""
with tf.name_scope("rgb_to_real", values=[x]):
x = to_float(x)
# Convert each pixel intensity in [0, 1, 2, ..., 255] into a real number in
# the range [-1, 1].
x = (x / 127.5) - 1
return x
def convert_real_to_rgb(x):
"""Conversion of real numbers to pixel values."""
with tf.name_scope("real_to_rgb", values=[x]):
x *= 255.0
return x
def expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1):
"""Make x n-d with squeeze and expand_dims."""
if len(x.shape) > n:
while len(x.shape) != n:
x = tf.squeeze(x, [squeeze_dim])
else:
while len(x.shape) != n:
x = tf.expand_dims(x, expand_dim)
return x
def standardize_images(x):
"""Image standardization on batches and videos."""
with tf.name_scope("standardize_images", values=[x]):
x_shape = shape_list(x)
x = to_float(tf.reshape(x, [-1] + x_shape[-3:]))
x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_variance = tf.reduce_mean(
tf.squared_difference(x, x_mean), axis=[1, 2], keepdims=True)
num_pixels = to_float(x_shape[-2] * x_shape[-3])
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
return tf.reshape(x, x_shape)
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result
# TODO(noam): remove this function after TPUs do gather faster.
def gather(params, indices, dtype=tf.float32):
"""Version of tf.gather that works faster on tpu."""
if not is_xla_compiled():
return tf.gather(params, indices)
vocab_size = params.get_shape().as_list()[0]
indices_flat = tf.reshape(indices, [-1])
out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
out = reshape_like(out, tf.expand_dims(indices, -1))
return out
# TODO(noam): remove this function after TPUs do cumsum faster.
def cumsum(x, axis=0, exclusive=False):
"""TPU hack for tf.cumsum.
This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
the axis dimension is very large.
Args:
x: a Tensor
axis: an integer
exclusive: a boolean
Returns:
Tensor of the same shape as x.
"""
if not is_xla_compiled():
return tf.cumsum(x, axis=axis, exclusive=exclusive)
x_shape = shape_list(x)
rank = len(x_shape)
length = x_shape[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
ret = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != rank - 1:
ret = tf.transpose(
ret,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return ret
def dropout_no_scaling(x, keep_prob):
"""Like tf.nn.dropout, but does not scale up. Works on integers also.
Args:
x: a Tensor
keep_prob: a floating point number
Returns:
Tensor of the same shape as x.
"""
if keep_prob == 1.0:
return x
mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob)
return x * cast_like(mask, x)
def embedding(x,
vocab_size,
dense_size,
name=None,
reuse=None,
multiplier=1.0,
symbol_dropout_rate=0.0,
embedding_var=None,
dtype=tf.float32):
"""Embed x of type int64 into dense vectors, reducing to max 4 dimensions."""
with tf.variable_scope(
name, default_name="embedding", values=[x], reuse=reuse, dtype=dtype):
if embedding_var is None:
embedding_var = tf.get_variable("kernel", [vocab_size, dense_size])
# On the backwards pass, we want to convert the gradient from
# an indexed-slices to a regular tensor before sending it back to the
# parameter server. This avoids excess computation on the parameter server.
if not tf.executing_eagerly():
embedding_var = convert_gradient_to_tensor(embedding_var)
x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate)
emb_x = gather(embedding_var, x, dtype)
if multiplier != 1.0:
emb_x *= multiplier
static_shape = emb_x.shape.as_list()
if len(static_shape) < 5:
return emb_x
assert len(static_shape) == 5
# If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1.
return tf.squeeze(emb_x, 3)
def shift_right(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets
def shift_right_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
def shift_right_2d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0]])[:, :-1]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1]
return shifted_targets
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
"""Use a strided convolution to downsample x by 2, `nbr_steps` times.
We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
As detailed in http://distill.pub/2016/deconv-checkerboard/.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: number of halving downsample rounds to apply
output_filters: an int specifying the filter count for the convolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
`[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
if nbr_steps == 0:
out = conv(x, output_filters, (1, 1))
return out, [out]
hidden_layers = [x]
for i in range(nbr_steps):
hidden_layers.append(
conv(
hidden_layers[-1],
output_filters, (2, 2),
strides=2,
activation=tf.nn.relu,
name="conv" + str(i)))
return hidden_layers[-1], hidden_layers
def deconv_stride2_multistep(x,
nbr_steps,
output_filters,
name=None,
reuse=None):
"""Use a deconvolution to upsample x by 2**`nbr_steps`.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: an int specifying the number of doubling upsample rounds to
apply.
output_filters: an int specifying the filter count for the deconvolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or
`[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse):
def deconv1d(cur, i):
cur_shape = shape_list(cur)
thicker = conv(
cur,
output_filters * 2, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv1d" + str(i))
return tf.reshape(thicker,
[cur_shape[0], cur_shape[1] * 2, 1, output_filters])
def deconv2d(cur, i):
thicker = conv(
cur,
output_filters * 4, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv2d" + str(i))
return tf.depth_to_space(thicker, 2)
cur = x
for i in range(nbr_steps):
if cur.get_shape()[2] == 1:
cur = deconv1d(cur, i)
else:
cur_dim = shape_list(cur)[2]
if isinstance(cur_dim, int):
if cur_dim == 1:
cur = deconv1d(cur, i)
else:
cur = deconv2d(cur, i)
else:
cur = tf.cond(
tf.equal(cur_dim, 1),
lambda idx=i: deconv1d(cur, idx),
lambda idx=i: deconv2d(cur, idx))
return cur
def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):
"""Conditional conv_fn making kernel 1d or 2d depending on inputs shape."""
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4. "
"Shape: " + str(static_shape))
# Add support for left padding.
if kwargs.get("padding") == "LEFT":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
# Set middle two dimensions to None to prevent convolution from complaining
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
kwargs["padding"] = "VALID"
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix)
original_name = kwargs.pop("name", None)
original_force2d = kwargs.pop("force2d", None)
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
return conv2d_kernel(kernel_size, "single")
def conv(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs):
def _conv2d(x, *args, **kwargs):
return layers().Conv2D(*args, **kwargs)(x)
return conv_internal(
_conv2d,
inputs,
filters,
kernel_size,
dilation_rate=dilation_rate,
**kwargs)
def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs):
return tf.squeeze(
conv(tf.expand_dims(inputs, 2), filters, (kernel_size, 1),
dilation_rate=(dilation_rate, 1), **kwargs),
2)
def separable_conv(inputs, filters, kernel_size, **kwargs):
def _sep_conv2d(x, *args, **kwargs):
return layers().SeparableConv2D(*args, **kwargs)(x)
return conv_internal(_sep_conv2d, inputs, filters, kernel_size, **kwargs)
def subseparable_conv(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution. If separability == 0 it's a separable_conv."""
def conv_fn(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution, splits into separability-many blocks."""
separability = None
if "separability" in kwargs:
separability = kwargs.pop("separability")
if separability:
parts = []
abs_sep = separability if separability > 0 else -1 * separability
for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):
with tf.variable_scope("part_%d" % split_idx):
if separability > 0:
parts.append(
layers().Conv2D(filters // separability, kernel_size,
**kwargs)(split))
else:
parts.append(
layers().SeparableConv2D(filters // abs_sep,
kernel_size, **kwargs)(split))
if separability > 1:
result = layers().Conv2D(filters, (1, 1))(tf.concat(parts, axis=3))
elif abs_sep == 1: # If we have just one block, return it.
assert len(parts) == 1
result = parts[0]
else:
result = tf.concat(parts, axis=3)
else:
result = layers().SeparableConv2D(filters, kernel_size,
**kwargs)(inputs)
if separability is not None:
kwargs["separability"] = separability
return result
return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"):
"""Version of conv1d that works on TPU (as of 11/2017).
Args:
inputs: a Tensor with shape [batch, length, input_depth].
filters: an integer.
kernel_size: an integer.
padding: a string - "SAME" or "LEFT".
name: a string.
Returns:
a Tensor with shape [batch, length, filters].
"""
if kernel_size == 1:
return dense(inputs, filters, name=name, use_bias=True)
if padding == "SAME":
assert kernel_size % 2 == 1
first_offset = -((kernel_size - 1) // 2)
else:
assert padding == "LEFT"
first_offset = -(kernel_size - 1)
last_offset = first_offset + kernel_size - 1
results = []
padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]])
for i in range(kernel_size):
shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs
shifted.set_shape(inputs.get_shape())
results.append(
dense(shifted, filters, use_bias=(i == 0), name=name + "_%d" % i))
ret = tf.add_n(results)
ret *= kernel_size**-0.5
return ret
def layer_norm_vars(filters):
"""Create Variables for layer norm."""
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
return scale, bias
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None):
"""Layer norm raw computation."""
# Save these before they get converted to tensors by the casting below
params = (scale, bias)
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
output = norm_x * scale + bias
return output
def layer_norm(x,
filters=None,
epsilon=1e-6,
name=None,
reuse=None,
layer_collection=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale, bias = layer_norm_vars(filters)
return layer_norm_compute(x, epsilon, scale, bias,
layer_collection=layer_collection)
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
"""Group normalization as in https://arxiv.org/abs/1803.08494."""
x_shape = shape_list(x)
if filters is None:
filters = x_shape[-1]
assert len(x_shape) == 4
assert filters % num_groups == 0
# Prepare variables.
scale = tf.get_variable(
"group_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"group_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
# Reshape and compute group norm.
x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
# Calculate mean and variance on heights, width, channels (not groups).
mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return tf.reshape(norm_x, x_shape) * scale + bias
def noam_norm(x, epsilon=1.0, name=None):
"""One version of layer normalization."""
with tf.name_scope(name, default_name="noam_norm", values=[x]):
shape = x.get_shape()
ndims = len(shape)
return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt(
to_float(shape[-1])))
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op
def apply_norm(x, norm_type, depth, epsilon, layer_collection=None):
"""Apply Normalization."""
if layer_collection is not None:
assert norm_type == "layer"
if norm_type == "layer":
return layer_norm(
x, filters=depth, epsilon=epsilon, layer_collection=layer_collection)
if norm_type == "group":
return group_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "batch":
return layers().BatchNormalization(epsilon=epsilon)(x)
if norm_type == "noam":
return noam_norm(x, epsilon)
if norm_type == "l2":
return l2_norm(x, filters=depth, epsilon=epsilon)
if norm_type == "none":
return x
raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch',"
"'noam', 'lr', 'none'.")
def zero_add(previous_value, x, name=None, reuse=None):
"""Resnet connection with zero initialization.
Another type of resnet connection which returns previous_value + gamma * x.
gamma is a trainable scalar and initialized with zero. It is useful when a
module is plugged into a trained model and we want to make sure it matches the
original model's performance.
Args:
previous_value: A tensor.
x: A tensor.
name: name of variable scope; defaults to zero_add.
reuse: reuse scope.
Returns:
previous_value + gamma * x.
"""
with tf.variable_scope(name, default_name="zero_add", reuse=reuse):
gamma = tf.get_variable("gamma", (), initializer=tf.zeros_initializer())
return previous_value + gamma * x
def layer_prepostprocess(previous_value,
x,
sequence,
dropout_rate,
norm_type,
depth,
epsilon,
default_name,
name=None,
dropout_broadcast_dims=None,
layer_collection=None):
"""Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
z: zero add
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
norm_type: a string (see apply_norm())
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
a Tensor
"""
with tf.variable_scope(name, default_name=default_name):
if sequence == "none":
return x
for c in sequence:
if c == "a":
x += previous_value
elif c == "z":
x = zero_add(previous_value, x)
elif c == "n":
x = apply_norm(
x, norm_type, depth, epsilon, layer_collection=layer_collection)
else:
assert c == "d", ("Unknown sequence step %s" % c)
x = dropout_with_broadcast_dims(
x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return x
def layer_preprocess(layer_input, hparams, layer_collection=None):
"""Apply layer preprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_preprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
hparams: a hyperparameters object.
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
a Tensor
"""
assert "a" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
assert "z" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
return layer_prepostprocess(
None,
layer_input,
sequence=hparams.layer_preprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_prepostprocess",
layer_collection=layer_collection)
def layer_postprocess(layer_input, layer_output, hparams):
"""Apply layer postprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_postprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
layer_output: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
return layer_prepostprocess(
layer_input,
layer_output,
sequence=hparams.layer_postprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_postprocess")
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
first_relu=True,
use_elu=False,
separabilities=None,
**kwargs):
"""A block of convolutions.
Args:
conv_fn: convolution function, e.g. conv or separable_conv.
inputs: a Tensor
filters: an Integer
dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))
first_relu: whether to do a relu at start (defaults to True)
use_elu: whether to use ELUs instead of ReLUs (defaults to False)
separabilities: list of separability factors (per-layer).
**kwargs: additional arguments (e.g., pooling)
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
# Usage for normalize_fn kwarg:
# if not specified, use layer norm
# if given normalize_fn=None, don't use any normalization
# if given normalize_fn=norm, use the specified norm function
use_layer_norm = "normalizer_fn" not in kwargs
norm = kwargs.pop("normalizer_fn", None)
use_normalizer_fn = use_layer_norm or norm
if use_layer_norm:
norm = lambda x, name: layer_norm(x, filters, name=name)
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if first_relu or counter > 0:
cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
**kwargs)
if use_normalizer_fn:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 2d convolutions."""
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 1d convolutions."""
return conv_block_internal(conv1d, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(separable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(subseparable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
"""Pooling (supports "LEFT")."""
with tf.name_scope("pool", values=[inputs]):
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
# Add support for left padding.
if padding == "LEFT":
assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
if len(static_shape) == 3:
width_padding = 2 * (window_size[1] // 2)
padding_ = [[0, 0], [width_padding, 0], [0, 0]]
else:
height_padding = 2 * (window_size[0] // 2)
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (window_size[1] // 2)))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding_)
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
padding = "VALID"
return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides)
def conv_block_downsample(x,
kernel,
strides,
padding,
separability=0,
name=None,
reuse=None):
"""Implements a downwards-striding conv block, like Xception exit flow."""
with tf.variable_scope(
name, default_name="conv_block_downsample", values=[x], reuse=reuse):
hidden_size = int(x.get_shape()[-1])
res = conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
strides=strides,
name="res_conv")
x = subseparable_conv_block(
x,
hidden_size, [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv0")
x = subseparable_conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv1")
x = pool(x, kernel, "MAX", padding, strides=strides)
x += res
x = subseparable_conv_block(
x,
2 * hidden_size, [((1, 1), kernel)],
first_relu=False,
padding=padding,
separability=separability,
name="conv2")
x = subseparable_conv_block(
x,
int(2.5 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv3")
return x
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
"""
positions = to_float(tf.range(length))
log_timescale_increment = (
math.log(max_timescale / min_timescale) / (num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
This allows attention to learn to use absolute and relative positions.
The timing signal should be added to some precursor of both the source
and the target of the attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the depth dimension, padded with zeros to be the same depth as the input,
and added into input.
Args:
x: a Tensor with shape [?, length, ?, depth]
min_timescale: a float
max_timescale: a float
num_timescales: an int <= depth/2
Returns:
a Tensor the same shape as x.
"""
length = shape_list(x)[1]
depth = shape_list(x)[3]
signal = get_timing_signal(length, min_timescale, max_timescale,
num_timescales)
padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])
return x + tf.reshape(padded_signal, [1, length, 1, depth])
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
"""
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))
def length_from_embedding(emb):
"""Compute the length of each sequence in the batch.
Args:
emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth].
Returns:
a Tensor with shape [batch].
"""
return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32)
def mask_pos_gt(source_length, target_length):
"""A mask with 1.0 wherever source_pos > target_pos and 0.0 elsewhere.
Args:
source_length: an integer
target_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return tf.expand_dims(
tf.cast(tf.greater(tf.expand_dims(tf.range(target_length), axis=0),
tf.expand_dims(tf.range(source_length), axis=1)),
dtype=tf.float32), axis=0)
def mask_leq(target_length, source_length):
"""A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.
Args:
target_length: an integer
source_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return ones_matrix_band_part(
target_length,
source_length,
-1,
0,
out_shape=[1, target_length, source_length])
def mask_pos_lt(source_length, target_length):
"""A mask with 1.0 wherever source_pos < target_pos and 0.0 elsewhere.
Args:
source_length: an integer
target_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return tf.expand_dims(
tf.cast(tf.less(tf.expand_dims(tf.range(target_length), axis=0),
tf.expand_dims(tf.range(source_length), axis=1)),
dtype=tf.float32), axis=0)
def relu_density_logit(x, reduce_dims):
"""logit(density(x)).
Useful for histograms.
Args:
x: a Tensor, typically the output of tf.relu
reduce_dims: a list of dimensions
Returns:
a Tensor
"""
frac = tf.reduce_mean(to_float(x > 0.0), reduce_dims)
scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
return scaled
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
Tensor of the same shape as inputs.
"""
if (kernel_size != 1 and kernel_size != (1, 1) and
nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
return inputs
def dense_relu_dense(inputs,
filter_size,
output_size,
output_activation=None,
dropout=0.0,
dropout_broadcast_dims=None,
layer_collection=None,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
# layer_name is appended with "conv1" or "conv2" in this method only for
# historical reasons. These are in fact dense layers.
layer_name = "%s_{}" % name if name else "{}"
h = dense(
inputs,
filter_size,
use_bias=True,
activation=tf.nn.relu,
layer_collection=layer_collection,
name=layer_name.format("conv1"))
if dropout != 0.0:
h = dropout_with_broadcast_dims(
h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims)
o = dense(
h,
output_size,
activation=output_activation,
use_bias=True,
layer_collection=layer_collection,
name=layer_name.format("conv2"))
return o
def dense_dropconnect(inputs,
output_size,
dropconnect_dropout=0.0,
name="dense_dropconnect",
**kwargs):
"""Dense layer with dropconnect."""
if dropconnect_dropout != 0.0:
tf.logging.info("Applying dropconnect as the kernel regularization.")
kwargs["kernel_regularizer"] = functools.partial(
tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout)
return dense(inputs, output_size, use_bias=True, name=name, **kwargs)
def conv_relu_conv(inputs,
filter_size,
output_size,
first_kernel_size=3,
second_kernel_size=3,
padding="SAME",
nonpadding_mask=None,
dropout=0.0,
name=None,
cache=None,
decode_loop_step=None):
"""Hidden layer with RELU activation followed by linear projection.
Args:
inputs: A tensor.
filter_size: An integer.
output_size: An integer.
first_kernel_size: An integer.
second_kernel_size: An integer.
padding: A string.
nonpadding_mask: A tensor.
dropout: A float.
name: A string.
cache: A dict, containing Tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop.
Only used for inference on TPU. If it is not None, the function
will do inplace update for the cache instead of concatenating the
current result to the cache.
Returns:
A Tensor.
"""
with tf.variable_scope(name, "conv_relu_conv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if cache:
if decode_loop_step is None:
inputs = cache["f"] = tf.concat([cache["f"], inputs], axis=1)
else:
# Inplace update is required for inference on TPU.
# Inplace_ops only supports inplace_update on the first dimension.
# The performance of current implementation is better than updating
# the tensor by adding the result of matmul(one_hot,
# update_in_current_step)
tmp_f = tf.transpose(cache["f"], perm=[1, 0, 2])
tmp_f = inplace_ops.alias_inplace_update(
tmp_f,
decode_loop_step * tf.shape(inputs)[1],
tf.transpose(inputs, perm=[1, 0, 2]))
inputs = cache["f"] = tf.transpose(tmp_f, perm=[1, 0, 2])
inputs = cache["f"] = inputs[:, -first_kernel_size:, :]
h = tpu_conv1d(
inputs, filter_size, first_kernel_size, padding=padding, name="conv1")
if cache:
h = h[:, -1:, :]
h = tf.nn.relu(h)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
return tpu_conv1d(
h, output_size, second_kernel_size, padding=padding, name="conv2")
def sepconv_relu_sepconv(inputs,
filter_size,
output_size,
first_kernel_size=(1, 1),
second_kernel_size=(1, 1),
padding="LEFT",
nonpadding_mask=None,
dropout=0.0,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]):
inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
h = separable_conv(
inputs,
filter_size,
first_kernel_size,
activation=tf.nn.relu,
padding=padding,
name="conv1")
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
ret = separable_conv(
h, output_size, second_kernel_size, padding=padding, name="conv2")
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
# DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv
def conv_hidden_relu(inputs,
hidden_size,
output_size,
kernel_size=(1, 1),
second_kernel_size=(1, 1),
dropout=0.0,
**kwargs):
"""Hidden layer with RELU activation followed by linear projection."""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_hidden_relu", [inputs]):
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
conv_f1 = conv if kernel_size == (1, 1) else separable_conv
h = conv_f1(
inputs,
hidden_size,
kernel_size,
activation=tf.nn.relu,
name="conv1",
**kwargs)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv
ret = conv_f2(h, output_size, second_kernel_size, name="conv2", **kwargs)
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
def conv_gru(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional GRU in 1 dimension."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start, padding):
return conv(
args,
filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate,
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="conv_gru", values=[x], reuse=reuse):
reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding))
gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding))
return gate * x + (1 - gate) * candidate
def gru_feedfwd(a_t, h_prev, filters, name=None):
"""position-wise Feed-fwd GRU gates following the MPNN.
Args:
a_t: Tensor of shape [batch, length, depth] of current input
h_prev: Tensor of shape [batch, length, depth] of prev input
filters: an integer specifying number of dimensions of the filters
name: A string
Returns:
h_t: [batch, length, filters] hidden state
"""
with tf.variable_scope(name, default_name="GRU", values=[a_t, h_prev]):
# we use right matrix multiplication to handle batches
# W_z and W_r have shape 2d, d. U_z U_r have shape d,d
z_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_z") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_z")))
r_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_r") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_r")))
h_tilde = (
tf.tanh(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W") +
tpu_conv1d(r_t * h_prev, filters, 1, padding="SAME", name="U")))
h_t = (1. - z_t) * h_prev + z_t * h_tilde
return h_t
def conv_lstm(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional LSTM in 1 dimension."""
with tf.variable_scope(
name, default_name="conv_lstm", values=[x], reuse=reuse):
gates = conv(
x,
4 * filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate)
g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
return tf.sigmoid(g[2]) * tf.tanh(new_cell)
def diagonal_conv_gru(x,
kernel_size,
filters,
dropout=0.0,
name=None,
reuse=None):
"""Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start):
return conv(
args,
filters,
kernel_size,
padding="SAME",
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="diagonal_conv_gru", values=[x], reuse=reuse):
reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5))
gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0))
if dropout > 0.0:
candidate = tf.nn.dropout(candidate, 1.0 - dropout)
# Diagonal shift.
shift_filters = filters // 3
base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) +
[[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)
shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)
shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)
x_shifted = tf.nn.depthwise_conv2d(
x, shift_filter, [1, 1, 1, 1], padding="SAME")
# Return the gated result and cost.
total_cost_avg = 0.5 * (reset_cost + gate_cost)
return gate * x_shifted + (1 - gate) * candidate, total_cost_avg
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", values=[x, y]):
x_length = shape_list(x)[axis]
y_length = shape_list(y)[axis]
if (isinstance(x_length, int) and isinstance(y_length, int) and
x_length == y_length and final_length_divisible_by == 1):
return x, y
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
def pad_with_zeros(logits, labels):
"""Pad labels on the length dimension to match logits length."""
with tf.name_scope("pad_with_zeros", values=[logits, labels]):
logits, labels = pad_to_same_length(logits, labels)
if len(labels.shape) == 3: # 2-d labels.
logits, labels = pad_to_same_length(logits, labels, axis=2)
return logits, labels
def weights_nonzero(labels):
"""Assign weight 1.0 to all labels except for padding (id=0)."""
return to_float(tf.not_equal(labels, 0))
def weights_prepend_inputs_to_targets(labels):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats.
"""
past_first_zero = tf.cumsum(to_float(tf.equal(labels, 0)), axis=1)
nonzero = to_float(labels)
return to_float(tf.not_equal(past_first_zero * nonzero, 0))
def check_nonnegative(value):
"""Check that the value is nonnegative."""
if isinstance(value, tf.Tensor):
with tf.control_dependencies([tf.assert_greater_equal(value, 0)]):
value = tf.identity(value)
elif value < 0:
raise ValueError("Value must be non-negative.")
return value
def weights_multi_problem(labels, taskid=-1):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all labels past the taskid.
Args:
labels: A Tensor of int32s.
taskid: an int32 representing the task id for a problem.
Returns:
A Tensor of floats.
Raises:
ValueError: The Task ID must be valid.
"""
taskid = check_nonnegative(taskid)
past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= to_float(tf.not_equal(labels, taskid))
non_taskid = to_float(labels)
return to_float(tf.not_equal(past_taskid * non_taskid, 0))
def weights_multi_problem_all(labels, taskid=-1):
"""Assign weight 1.0 to only examples from the given task."""
taskid = check_nonnegative(taskid)
weights = to_float(tf.not_equal(labels, 0))
past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= to_float(tf.not_equal(labels, taskid))
non_taskid = to_float(labels)
example_mask = to_float(tf.not_equal(past_taskid * non_taskid, 0))
example_mask = tf.reduce_sum(example_mask, axis=1)
example_mask = to_float(
tf.greater(example_mask, tf.zeros_like(example_mask)))
return weights * tf.expand_dims(example_mask, axis=-1)
def weights_multi_problem_input(labels, taskid=-1):
"""Assign weight 1.0 to only the inputs for the given task."""
taskid = check_nonnegative(taskid)
weights_all_tokens = weights_multi_problem_all(labels, taskid)
weights_target = weights_multi_problem(labels, taskid)
return weights_all_tokens - weights_target
def weights_all(labels):
"""Assign weight 1.0 to all labels."""
return tf.ones_like(labels, dtype=tf.float32)
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one,
[[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = to_float(tf.logical_and(nonboilerplate, in_target))
return ret
def padded_cross_entropy(logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True,
cutoff=0.0,
gaussian=False):
"""Compute cross-entropy assuming 0s are padding.
Computes a loss numerator (the sum of losses), and loss denominator
(the number of non-padding tokens).
Args:
logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`.
optionally a FactoredTensor.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
cutoff: a float, at which point to have no loss.
gaussian: If true, use a Gaussian distribution for label smoothing
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
Raises:
ValueError: in case of unsupported argument types.
"""
if isinstance(logits, FactoredTensor):
if gaussian:
raise ValueError("Factored padded cross entropy with Gaussian smoothing "
"is not implemented yet.")
return padded_cross_entropy_factored(
logits,
labels,
label_smoothing,
weights_fn=weights_fn,
reduce_sum=reduce_sum)
confidence = 1.0 - label_smoothing
logits_shape = shape_list(logits)
vocab_size = logits_shape[-1]
with tf.name_scope("padded_cross_entropy", values=[logits, labels]):
if len(logits_shape) == 2:
# Deal with the case where we did not insert extra dimensions due to
# TPU issues. No pad-to-same-length happens in this case.
# TODO(noam): remove this logic once TPU can handle extra dimensions.
labels = tf.reshape(labels, [-1])
else:
logits, labels = pad_with_zeros(logits, labels)
logits = tf.reshape(
logits,
shape_list(labels) + [vocab_size],
name="padded_cross_entropy_size_check")
logits = tf.cast(logits, tf.float32)
xent = smoothing_cross_entropy(
logits, labels, vocab_size, confidence, gaussian=gaussian)
weights = weights_fn(labels)
if cutoff > 0.0:
xent = tf.nn.relu(xent - cutoff)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def _weights_one_third(labels):
"""Returns Tensor of shape [batch, height, width]. Each element is 1/3."""
return tf.ones(tf.shape(labels)[:-1]) / 3.
def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True):
"""Discretized mixture of logistics loss.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of 8-bit pixel
intensities. The computation assumes channels is 3.
weights_fn: A function of labels, returning a Tensor of shape
[batch, height, width] which weights each loss term. Default is to scale
each loss term by 1/3 so that they capture the average across channels.
reduce_sum: A boolean, to return scalar loss instead of per position.
Returns:
Tuple of loss tensors for numerator and denominator, each a scalar if
reduce_sum else of shape [batch, height, width]. The sum of their divisions
is the number of nats for each pixel in labels.
"""
real_labels = convert_rgb_to_symmetric_real(labels)
dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels)
weights = weights_fn(labels)
loss_num = weights * dml_loss_value
loss_den = weights_nonzero(weights)
if reduce_sum:
loss_num = tf.reduce_sum(loss_num)
loss_den = tf.reduce_sum(loss_den)
return loss_num, loss_den
def split_to_discretized_mix_logistic_params(inputs):
"""Splits input tensor into parameters of discretized mixture logistic.
Args:
inputs: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
Returns:
Tuple of unconstrained mixture probabilities, locations, scales, and
coefficient parameters of the distribution. The mixture probability has
shape [batch, height, width, num_mixtures]. Other parameters have shape
[batch, height, width, num_mixtures, 3].
"""
batch, height, width, output_dim = shape_list(inputs) # pylint: disable=unbalanced-tuple-unpacking
num_mixtures = output_dim // 10
logits, locs, log_scales, coeffs = tf.split(
inputs,
num_or_size_splits=[
num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3
],
axis=-1)
split_shape = [batch, height, width, num_mixtures, 3]
locs = tf.reshape(locs, split_shape)
log_scales = tf.reshape(log_scales, split_shape)
log_scales = tf.maximum(log_scales, -7.)
coeffs = tf.reshape(coeffs, split_shape)
coeffs = tf.tanh(coeffs)
return logits, locs, log_scales, coeffs
def discretized_mix_logistic_loss(pred, labels):
"""Computes negative log probability for the discretized mixture of logistics.
The distribution of a whole pixel is a mixture of 3-dimensional discretized
logistic distributions. The 3-D discretized logistic factorizes as 3 1-D
discretized logistic distributions, one for each channel. It defines
```none
P(X = x)
= sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k])
= sum_{k=1}^K probs[k] * [
prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ]
```
The means tensor is a linear combination of location parameters and previous
channels. The discretized logistic distribution assigns probability mass to an
event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X < x - 0.5) for 1 < x <
254; P(X <= 0.5) for x = 0; and 1 - P(X < 245.5) for x = 255. Instead of
8-bit inputs, this implementation assumes the events are rescaled to [-1, 1].
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of true pixel intensities
rescaled to [-1, 1]. The computation assumes channels is 3.
Returns:
A [batch, height, width] tensor of the negative log conditional probability
of each pixel given all previous pixels.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Tile labels to broadcast compute across the mixture dimension.
batch, height, width, num_mixtures = shape_list(logits) # pylint: disable=unbalanced-tuple-unpacking
labels = tf.tile(
tf.reshape(labels, [batch, height, width, 1, 3]),
[1, 1, 1, num_mixtures, 1])
# p(x) = sigmoid((x - means_i + 1/255.)/scale_i) -
# sigmoid((x - means_i - 1/255.)/scale_i)
# for each channel i. The means are linearly parameterized.
means_0 = locs[..., 0]
means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0]
means_2 = (
locs[..., 2] + coeffs[..., 1] * labels[..., 0] +
coeffs[..., 2] * labels[..., 1])
means = tf.stack([means_0, means_1, means_2], axis=-1)
centered_labels = labels - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
# Compute log probability for edge case of 0 (before scaling), 255 (before
# scaling), and all other cases respectively.
log_prob_0 = plus_in - tf.nn.softplus(plus_in)
log_prob_255 = -tf.nn.softplus(min_in)
prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12)
log_prob_event = tf.log(prob_event)
# Robustly select log-prob based on numerical edge-cases: (a) [-1, -1+eps);
# (b) (1-eps, 1]; (c) NaNs during `tf.gradients` of `tf.select`, which may
# cause `tf.log(0.)`; (d) p(x) < 1e-5.
mid_in = inv_stdv * centered_labels
log_prob_event_approx = (
mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5))
log_probs = tf.where(
labels < -0.999, log_prob_0,
tf.where(
labels > 0.999, log_prob_255,
tf.where(prob_event > 1e-5, log_prob_event, log_prob_event_approx)))
# Sum over channels and compute log-probability of each mixture.
log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1)
output = -tf.reduce_logsumexp(log_probs, axis=-1)
return output
def sample_from_discretized_mix_logistic(pred, seed=None):
"""Sampling from a discretized mixture of logistics.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
seed: Random seed.
Returns:
A tensor of shape [batch, height, width, 3] with real intensities scaled
between -1 and 1.
"""
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Sample mixture indicator given logits using the gumbel max trick.
num_mixtures = shape_list(logits)[-1]
gumbel_noise = -tf.log(-tf.log(
tf.random_uniform(
tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))
sel = tf.one_hot(
tf.argmax(logits + gumbel_noise, -1),
depth=num_mixtures,
dtype=tf.float32)
# Select mixture component's parameters.
sel = tf.expand_dims(sel, -1)
locs = tf.reduce_sum(locs * sel, 3)
log_scales = tf.reduce_sum(log_scales * sel, 3)
coeffs = tf.reduce_sum(coeffs * sel, 3)
# Sample from 3-D logistic & clip to interval. Note we don't round to the
# nearest 8-bit value when sampling.
uniform_noise = tf.random_uniform(
tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)
logistic_noise = tf.log(uniform_noise) - tf.log1p(-uniform_noise)
x = locs + tf.exp(log_scales) * logistic_noise
x0 = x[..., 0]
x1 = x[..., 1] + coeffs[..., 0] * x0
x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1
x = tf.stack([x0, x1, x2], axis=-1)
x = tf.clip_by_value(x, -1., 1.)
return x
def smoothing_cross_entropy(logits,
labels,
vocab_size,
confidence,
gaussian=False):
"""Cross entropy with label smoothing to limit over-confidence.
Args:
logits: Tensor of shape [batch_size, ?, ?, ?, vocab_size].
labels: Tensor of shape [batch_size, ?, ?, ?].
vocab_size: Tensor representing the size of the vocabulary.
confidence: Used to determine on and off values for label smoothing.
If `gaussian` is true, `confidence` is the variance to the Gaussian
distribution.
gaussian: Uses a Gaussian distribution for label smoothing
Returns:
Tensor of shape [batch_size, ?, ?, ?].
"""
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
low_confidence = (1.0 - confidence) / to_float(vocab_size - 1)
# Normalizing constant is the best cross-entropy value with soft targets.
# We subtract it just for readability, makes no difference on learning.
normalizing = -(
confidence * tf.log(confidence) + to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
if gaussian and confidence > 0.0:
labels = tf.cast(labels, tf.float32)
normal_dist = tfp.distributions.Normal(loc=labels, scale=confidence)
# Locations to evaluate the probability distributions.
soft_targets = normal_dist.prob(
tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None])
# Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match
# logits: [batch_size, ?, ?, ?, vocab_size]
soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0])
else:
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
return xentropy - normalizing
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
"""Pool elements across the last dimension.
Useful to convert a list of vectors into a single vector so as
to get a representation of a set.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: the pooling type to use, MAX or AVR
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
Returns:
A tensor of shape [batch_size, input_dims] containing the sequences of
transformed vectors.
"""
with tf.name_scope("global_pool", values=[inputs]):
if mask is not None:
mask = tf.expand_dims(mask, axis=2)
inputs = tf.multiply(inputs, mask)
if pooling_type == "MAX":
# A tf.pool can be used here, but reduce is cleaner
output = tf.reduce_max(inputs, axis=1)
elif pooling_type == "AVR":
if mask is not None:
# Some elems are dummy elems so we can't just reduce the average.
output = tf.reduce_sum(inputs, axis=1)
num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)
output = tf.div(output, tf.maximum(num_elems, 1))
else:
output = tf.reduce_mean(inputs, axis=1)
return output
def running_global_pool_1d(inputs, pooling_type="MAX"):
"""Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'.
"""
del pooling_type
with tf.name_scope("running_global_pool", values=[inputs]):
scan_fct = tf.maximum
# Permute inputs so seq_length is first.
elems = tf.transpose(inputs, [1, 0, 2])
# Perform scan.
cumulatives = tf.scan(scan_fct, elems, swap_memory=True)
# Permute output to get back to original order.
output = tf.transpose(cumulatives, [1, 0, 2])
return output
def gated_linear_unit_layer(x, name=None):
"""Gated linear unit layer.
Paper: Language Modeling with Gated Convolutional Networks.
Link: https://arxiv.org/abs/1612.08083
x = Wx * sigmoid(W'x).
Args:
x: A tensor
name: A string
Returns:
A tensor of the same shape as x.
"""
with tf.variable_scope(name, default_name="glu_layer", values=[x]):
depth = shape_list(x)[-1]
x = layers().Dense(depth * 2, activation=None)(x)
x, gating_x = tf.split(x, 2, axis=-1)
return x * tf.nn.sigmoid(gating_x)
def sru_with_scan(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
This implementation uses tf.scan and can incur overhead, see the full SRU
function doc for details and an implementation that is sometimes faster.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
x = tf.transpose(x, [1, 0, 2]) # Scan assumes time on axis 0.
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
# SRU state manipulation function.
def next_state(cur_state, args_tup):
cur_x_times_one_minus_f, cur_f = args_tup
return cur_f * cur_state + cur_x_times_one_minus_f
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
c_states = tf.scan(
next_state, (x_times_one_minus_f, f),
initializer=initial_state,
parallel_iterations=2,
name="scan_%d" % i)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
# Transpose back to batch-major.
x = tf.transpose(x, [1, 0, 2])
return tf.reshape(x, x_shape)
class CumsumprodCell(object):
"""Cumulative sum and product object for use with functional_rnn API."""
def __init__(self, initializer):
self._initializer = initializer
@property
def output_size(self):
return int(shape_list(self._initializer)[-1])
def zero_state(self, batch_size, dtype):
dtype = dtype or tf.float32
return tf.zeros([batch_size, self.output_size], dtype=dtype)
def __call__(self, inputs_t, state_t):
cur_x_times_one_minus_f, cur_f = tf.split(inputs_t, 2, axis=-1)
state_next = cur_f * state_t + cur_x_times_one_minus_f
outputs_t = state_next
return outputs_t, state_next
def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled(): # On TPU the XLA does a good job with while.
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
return tf.reshape(x, x_shape)
def linear_set_layer(layer_size,
inputs,
context=None,
activation_fn=tf.nn.relu,
dropout=0.0,
name=None):
"""Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors.
"""
with tf.variable_scope(
name, default_name="linear_set_layer", values=[inputs]):
# Apply 1D convolution to apply linear filter to each element
# along the 2nd dimension.
outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv")
# Apply the context if it exists.
if context is not None:
# Unfortunately tf doesn't support broadcasting via concat, but we can
# simply add the transformed context to get the same effect.
if len(context.get_shape().as_list()) == 2:
context = tf.expand_dims(context, axis=1)
cont_tfm = conv1d(
context, layer_size, 1, activation=None, name="cont_conv")
outputs += cont_tfm
if activation_fn is not None:
outputs = activation_fn(outputs)
if dropout != 0.0:
outputs = tf.nn.dropout(outputs, 1.0 - dropout)
return outputs
def ravanbakhsh_set_layer(layer_size,
inputs,
mask=None,
sequential=False,
activation_fn=tf.nn.tanh,
dropout=0.0,
name=None):
"""Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 .
More parameter-efficient version of a linear-set-layer with context.
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, vector]
containing the sequences of input vectors.
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
sequential: If true, will use a running global pool so each element will
only depend on those before it. Set true if this layer is being used in
an output sequence.
activation_fn: The activation function to use.
dropout: dropout.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, vector] containing the
sequences of transformed vectors.
"""
del dropout
with tf.variable_scope(name, "ravanbakhsh_set_layer", [inputs]):
if sequential:
return linear_set_layer(
layer_size,
inputs - running_global_pool_1d(inputs),
activation_fn=activation_fn,
name=name)
return linear_set_layer(
layer_size,
inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1),
activation_fn=activation_fn,
name=name)
def fn_device_dependency_dict():
"""State container for fn_device_dependency."""
default_graph = tf.get_default_graph()
if not hasattr(default_graph, "dependency_dict"):
default_graph.dependency_dict = collections.defaultdict(list)
return default_graph.dependency_dict
@contextlib.contextmanager
def fn_device_dependency(name, device=""):
"""Add control deps for name and device."""
key = name + "_" + device
outs = []
def body():
with tf.control_dependencies(fn_device_dependency_dict()[key]):
yield outs
assert outs
deps = outs
if isinstance(outs[0], (list, tuple)):
assert len(outs) == 1
deps = outs[0]
fn_device_dependency_dict()[key] = deps
if device:
with tf.device(device):
return body()
else:
return body()
def underlying_variable_ref(t):
"""Find the underlying variable ref.
Traverses through Identity, ReadVariableOp, and Enter ops.
Stops when op type has Variable or VarHandle in name.
Args:
t: a Tensor
Returns:
a Tensor that is a variable ref, or None on error.
"""
while t.op.type in ["Identity", "ReadVariableOp", "Enter"]:
t = t.op.inputs[0]
op_type = t.op.type
if "Variable" in op_type or "VarHandle" in op_type:
return t
else:
return None
def underlying_variable(t):
"""Find the underlying tf.Variable object.
Args:
t: a Tensor
Returns:
tf.Variable.
"""
t = underlying_variable_ref(t)
assert t is not None
# make sure that the graph has a variable index and that it is up-to-date
if not hasattr(tf.get_default_graph(), "var_index"):
tf.get_default_graph().var_index = {}
var_index = tf.get_default_graph().var_index
for v in tf.global_variables()[len(var_index):]:
var_index[v.name] = v
return var_index[t.name]
def approximate_split(x, num_splits, axis=0):
"""Split approximately equally into num_splits parts.
Args:
x: a Tensor
num_splits: an integer
axis: an integer.
Returns:
a list of num_splits Tensors.
"""
size = shape_list(x)[axis]
size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]
return tf.split(x, size_splits, axis=axis)
class FactoredTensor(object):
"""A concise factored representation of Tensor as two tensors.
This class represents the tensor tf.matmul(a, b, transpose_b=True)
by storing the values of Tensors a and b.
The reason for this is that the product may be too big to fully realize at
once, so it can be realized a part at a time.
"a" may have extra leading dimensions, in which case they are flattened out
before computing the matrix product, then re-expanded afterwards.
"""
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def to_tensor(self):
"""Convert to Tensor."""
a_shape = shape_list(self.a)
b_shape = shape_list(self.b)
inner_dim = b_shape[1]
result_dim = b_shape[0]
flat_a = tf.reshape(self.a, [-1, inner_dim])
product = tf.matmul(flat_a, self.b, transpose_b=True)
product_shape = a_shape[:-1] + [result_dim]
product = tf.reshape(product, product_shape)
product.set_shape(self.a.get_shape().as_list()[:-1] +
[self.b.get_shape()[0]])
return product
def _convert_factored_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.convert_to_tensor(value.to_tensor(), *args, **kwargs)
tf.register_tensor_conversion_function(FactoredTensor,
_convert_factored_tensor_to_tensor)
def smoothing_cross_entropy_factored_grad(op, dy):
"""Gradient function for smoothing_cross_entropy_factored."""
a = op.inputs[0]
b = op.inputs[1]
labels = op.inputs[2]
confidence = op.inputs[3]
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
dy = approximate_split(dy, num_splits)
b_grad = None
a_grad_parts = []
deps = []
for part in range(num_splits):
with tf.control_dependencies(deps):
logits = tf.matmul(a[part], b, transpose_b=True)
output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,
confidence)
a_grad_part, b_grad_part = tf.gradients(
ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])
a_grad_parts.append(a_grad_part)
if part > 0:
b_grad += b_grad_part
else:
b_grad = b_grad_part
deps = [b_grad, a_grad_part]
a_grad = tf.concat(a_grad_parts, 0)
return a_grad, b_grad, None, None
@function.Defun(
noinline=True,
python_grad_func=smoothing_cross_entropy_factored_grad,
compiled=True,
separate_compiled_gradients=True)
def smoothing_cross_entropy_factored(a, b, labels, confidence):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
a: a Tensor with shape [batch, inner_dim]
b: a Tensor with shape [vocab_size, inner_dim]
labels: an integer Tensor with shape [batch]
confidence: a float
Returns:
A Tensor with shape [batch]
"""
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
parts = []
for part in range(num_splits):
with tf.control_dependencies(parts[-1:]):
logits = tf.matmul(a[part], b, transpose_b=True)
parts.append(
smoothing_cross_entropy(logits, labels[part], vocab_size, confidence))
return tf.concat(parts, 0)
def padded_cross_entropy_factored(factored_logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
factored_logits: a `FactoredTensor` representing a Tensor
with shape `[batch, timesteps, vocab_size]`.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
"""
a = factored_logits.a
b = factored_logits.b
confidence = 1.0 - label_smoothing
with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]):
labels_flat = tf.reshape(labels, [-1])
a_flat = tf.reshape(a, [-1, shape_list(b)[1]])
xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat,
tf.convert_to_tensor(confidence))
xent = tf.reshape(xent, shape_list(labels))
weights = weights_fn(labels)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def fn_with_custom_grad(grad_fn, use_global_vars=False):
"""Decorator to create a subgraph with a custom gradient function.
The subgraph created by the decorated function is NOT put in a Defun and so
does not suffer from the limitations of the Defun (all subgraph ops on the
same device, no summaries).
Args:
grad_fn: function with signature
(inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
Decorator for function such that the gradient is defined by grad_fn.
"""
def dec(fn):
@functools.wraps(fn)
def wrapped(*args):
return _fn_with_custom_grad(
fn, args, grad_fn, use_global_vars=use_global_vars)
return wrapped
return dec
def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):
"""Create a subgraph with a custom gradient.
Args:
fn: function that takes inputs as arguments and produces 1 or more Tensors.
inputs: list<Tensor>, will be passed as fn(*inputs).
grad_fn: function with signature
(inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
fn(*inputs)
"""
vs = tf.compat.v1.get_variable_scope()
get_vars_fn = (
vs.global_variables if use_global_vars else vs.trainable_variables)
len_before_vars = len(get_vars_fn())
inputs = list(inputs)
outputs = fn(*inputs)
train_vars = get_vars_fn()[len_before_vars:]
if grad_fn is None:
return outputs
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
outputs = list(outputs)
defun_inputs = [inputs, train_vars, outputs]
def custom_grad_fn(op, *dys):
"""Custom grad fn applying grad_fn for identity Defun."""
fn_inputs, fn_vars, fn_outputs = tf.contrib.framework.nest.pack_sequence_as(
defun_inputs, list(op.inputs))
dys = list(dys)
assert len(fn_outputs) == len(outputs)
assert len(fn_outputs) == len(dys)
grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)
grad_outputs = [None] * len(fn_outputs)
return tuple(grad_inputs + grad_vars + grad_outputs)
# The Defun takes as input the original inputs, the trainable variables
# created in fn, and the outputs. In the forward it passes through the
# outputs. In the backwards, it produces gradients for the original inputs
# and the trainable variables.
in_types = [t.dtype for t in inputs]
out_types = [t.dtype for t in outputs]
var_types = [t.dtype for t in train_vars]
@function.Defun(
*(in_types + var_types + out_types),
func_name="identity_custom_grad%d" % ops.uid(),
python_grad_func=custom_grad_fn,
shape_func=lambda _: [t.get_shape() for t in outputs])
def identity(*args):
_, _, outs = tf.contrib.framework.nest.pack_sequence_as(defun_inputs, args)
return tuple([tf.identity(t) for t in outs])
flat_inputs = tf.contrib.framework.nest.flatten(defun_inputs)
id_out = identity(*flat_inputs)
return id_out
_function_cache = {}
def conv_hidden_relu_memory_efficient(x,
filter_size,
epsilon=1e-6,
forget=True,
test_vars=None,
name=None):
"""LayerNorm, Conv, ReLU, Conv.
All convolutions have kernel size 1.
returns conv(relu(conv(layer_norm(x))))
Args:
x: input Tensor with shape [batch, length, io_size]
filter_size: an integer - size of the hidden layer.
epsilon: a float (for layer norm)
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
a Tensor with shape [batch, length, io_size]
"""
io_size = x.get_shape().as_list()[-1]
def forward_internal(x, f1, f2, scale, bias):
"""Forward function."""
# split batch-wise to avoid exhausting memory in cast the batch is large
# and the hidden layer is large.
num_splits = 4
x_flat = tf.reshape(x, [-1, 1, shape_list(x)[2]])
xs = approximate_split(x_flat, num_splits)
ys = []
for i in range(num_splits):
with tf.control_dependencies(ys[-1:]):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
ys.append(y)
y = tf.concat(ys, 0)
y = tf.reshape(y, shape_list(x))
return y
key = ("conv_hidden_relu_memory_efficient %s" % epsilon)
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, f1, f2, scale, bias, dy):
"""Gradient for efficiency."""
with tf.control_dependencies([dy]):
num_splits = 4
x_shape = shape_list(x)
flat_shape = [-1, 1, x_shape[2]]
x = tf.reshape(x, flat_shape)
dy = tf.reshape(dy, flat_shape)
xs = approximate_split(x, num_splits)
dys = approximate_split(dy, num_splits)
dxs = []
df1 = 0
df2 = 0
dscale = 0
dbias = 0
deps = []
for i in range(num_splits):
with tf.control_dependencies(deps):
n = layer_norm_compute(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients(
ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]])
df1 += pdf1
df2 += pdf2
dscale += pdscale
dbias += pdbias
dxs.append(dxi)
deps = [dxi, df1, df2, dscale, dbias]
with tf.control_dependencies(deps):
dx = tf.concat(dxs, 0)
dx = tf.reshape(dx, x_shape)
return dx, df1, df2, dscale, dbias
@function.Defun(
grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, f1, f2, scale, bias):
return forward_internal(x, f1, f2, scale, bias)
with tf.variable_scope(name, default_name="ffn2", values=[x]):
# TODO(noam): it would be nice to save memory by casting x to float16
# here, but this causes problems with the gradients. Figure out if there
# is a way to leave the gradients as float32.
if test_vars is not None:
f1, f2, scale, bias = list(test_vars)
else:
f1 = tf.get_variable("f1", [1, io_size, filter_size])
f2 = tf.get_variable("f2", [1, filter_size, io_size])
scale, bias = layer_norm_vars(io_size)
if forget:
y = forward_fn(x, f1, f2, scale, bias)
else:
y = forward_internal(x, f1, f2, scale, bias)
y.set_shape(x.get_shape())
return y
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, dim in enumerate(static):
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def list_product(els):
prod = els[0]
for el in els[1:]:
prod *= el
return prod
def sample_with_temperature(logits, temperature, sampling_keep_top_k=-1):
"""Either argmax or random sampling.
Args:
logits: a Tensor.
temperature: a float 0.0=argmax 1.0=random
sampling_keep_top_k: If not -1, only sample from the top k logits.
Returns:
a Tensor with one fewer dimension than logits.
"""
if temperature == 0.0:
# TF argmax doesn't handle >5 dimensions, so we reshape here.
logits_shape = shape_list(logits)
argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1)
return tf.reshape(argmax, logits_shape[:-1])
else:
assert temperature > 0.0
if sampling_keep_top_k != -1:
if sampling_keep_top_k <= 0:
raise ValueError("sampling_keep_top_k must either be -1 or positive.")
vocab_size = shape_list(logits)[1]
k_largest = tf.contrib.nn.nth_element(
logits, n=sampling_keep_top_k, reverse=True)
k_largest = tf.tile(tf.reshape(k_largest, [-1, 1]), [1, vocab_size])
# Force every position that is not in the top k to have probability near
# 0 by setting the logit to be very negative.
logits = tf.where(tf.less_equal(logits, k_largest),
tf.ones_like(logits)*-1e6, logits)
reshaped_logits = (
tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices,
shape_list(logits)[:logits.get_shape().ndims - 1])
return choices
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
"""
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.linalg.band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
def reshape_like_all_dims(a, b):
"""Reshapes a to match the shape of b."""
ret = tf.reshape(a, tf.shape(b))
if not tf.executing_eagerly():
ret.set_shape(b.get_shape())
return ret
def recompute_grad(fn):
"""Decorator that recomputes the function on the backwards pass.
Args:
fn: a function that takes Tensors (all as positional arguments) and returns
a tuple of Tensors.
Returns:
A wrapped fn that is identical to fn when called, but its activations will
be discarded and recomputed on the backwards pass (i.e. on a call to
tf.gradients).
"""
@functools.wraps(fn)
def wrapped(*args):
return _recompute_grad(fn, args)
return wrapped
def _recompute_grad(fn, args):
"""See recompute_grad."""
cached_vs = []
cached_arg_scope = []
def grad_fn(inputs, variables, outputs, output_grads):
"""Recompute outputs for gradient computation."""
del outputs
variables = [underlying_variable_ref(v) for v in variables]
# Recompute outputs
with tf.control_dependencies(output_grads):
with tf.contrib.framework.arg_scope(cached_arg_scope[0]):
with tf.variable_scope(cached_vs[0], reuse=True):
outputs = fn(*inputs)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs = list(outputs)
grads = tf.gradients(outputs, inputs + variables, output_grads)
grad_inputs = grads[:len(inputs)]
grad_vars = grads[len(inputs):]
# TODO(rsepassi): Make fn_with_custom_grad work with bfloat16.
# If the input gradients are bfloat16, it's assumed the variables are
# bfloat16. This is a hack to ensure that grad_vars are the right type.
if grad_inputs[0].dtype == tf.bfloat16:
grad_vars = [tf.cast(grad_var, tf.bfloat16) for grad_var in grad_vars]
return grad_inputs, grad_vars
@fn_with_custom_grad(grad_fn)
def fn_with_recompute(*args):
cached_vs.append(tf.compat.v1.get_variable_scope())
cached_arg_scope.append(tf.contrib.framework.current_arg_scope())
return fn(*args)
return fn_with_recompute(*args)
def dense(x, units, **kwargs):
"""Identical to layers.dense."""
layer_collection = kwargs.pop("layer_collection", None)
activations = layers().Dense(units, **kwargs)(x)
if layer_collection:
# We need to find the layer parameters using scope name for the layer, so
# check that the layer is named. Otherwise parameters for different layers
# may get mixed up.
layer_name = tf.compat.v1.get_variable_scope().name
if (not layer_name) or ("name" not in kwargs):
raise ValueError(
"Variable scope and layer name cannot be empty. Actual: "
"variable_scope={}, layer name={}".format(
layer_name, kwargs.get("name", None)))
layer_name += "/" + kwargs["name"]
layer_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=layer_name)
assert layer_params
if len(layer_params) == 1:
layer_params = layer_params[0]
tf.logging.info(
"Registering dense layer to collection for tensor: {}".format(
layer_params))
x_shape = x.shape.as_list()
if len(x_shape) == 3:
# Handle [batch, time, depth] inputs by folding batch and time into
# one dimension: reshaping inputs to [batchxtime, depth].
x_2d = tf.reshape(x, [-1, x_shape[2]])
activations_shape = activations.shape.as_list()
activations_2d = tf.reshape(activations, [-1, activations_shape[2]])
layer_collection.register_fully_connected_multi(
layer_params, x_2d, activations_2d, num_uses=x_shape[1])
activations = tf.reshape(activations_2d, activations_shape)
else:
layer_collection.register_fully_connected(layer_params, x, activations)
return activations
def batch_dense(inputs,
units,
activation=None,
kernel_initializer=None,
reuse=None,
name=None):
"""Multiply a batch of input matrices by a batch of parameter matrices.
Each input matrix is multiplied by the corresponding parameter matrix.
This is useful in a mixture-of-experts where the batch represents different
experts with different inputs.
Args:
inputs: a Tensor with shape [batch, length, input_units]
units: an integer
activation: an optional activation function to apply to the output
kernel_initializer: an optional initializer
reuse: whether to reuse the varaible scope
name: an optional string
Returns:
a Tensor with shape [batch, length, units]
Raises:
ValueError: if the "batch" or "input_units" dimensions of inputs are not
statically known.
"""
inputs_shape = shape_list(inputs)
if len(inputs_shape) != 3:
raise ValueError("inputs must have 3 dimensions")
batch = inputs_shape[0]
input_units = inputs_shape[2]
if not isinstance(batch, int) or not isinstance(input_units, int):
raise ValueError("inputs must have static dimensions 0 and 2")
with tf.variable_scope(
name,
default_name="batch_dense",
values=[inputs],
reuse=reuse,
dtype=inputs.dtype):
if kernel_initializer is None:
kernel_initializer = tf.random_normal_initializer(
stddev=input_units**-0.5)
w = tf.get_variable(
"w", [batch, input_units, units],
initializer=kernel_initializer,
dtype=inputs.dtype)
y = tf.matmul(inputs, w)
if activation is not None:
y = activation(y)
return y
def mix(x1,
x2,
steps,
is_training,
min_prob=0.0,
max_prob=1.0,
mode="lin",
simple=False,
broadcast_last=False):
"""Mix starting with x2, mixing mixing, going towards x1."""
with tf.name_scope("mix"):
if not is_training:
if max_prob >= 1.0:
return x1
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = to_float(tf.less(alpha, max_prob))
return alpha * x1 + (1.0 - alpha) * x2
def get_res():
"""Create the result.
Separate function to speed it up later (see below).
Returns:
Tensor of mixed inputs.
"""
if mode == "lin":
alpha_p = inverse_lin_decay(steps)
else:
alpha_p = inverse_exp_decay(steps)
alpha_p = alpha_p * (max_prob - min_prob) + min_prob
if simple:
return alpha_p * x1 + (1.0 - alpha_p) * x2
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = to_float(tf.less(alpha, alpha_p))
return alpha * x1 + (1.0 - alpha) * x2
if max_prob < 1.0:
return get_res()
# Prevent sampling after steps is passed to speed it up.
if is_xla_compiled():
return get_res()
else:
cur_step = tf.train.get_global_step()
if cur_step is None:
return x1 # Step not available, probably eval mode, don't mix.
return tf.cond(tf.less(cur_step, steps), get_res, lambda: x1)
def brelu(x):
"""Bipolar ReLU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.relu(x1)
y2 = -tf.nn.relu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def belu(x):
"""Bipolar ELU as in https://arxiv.org/abs/1709.04054."""
x_shape = shape_list(x)
x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
y1 = tf.nn.elu(x1)
y2 = -tf.nn.elu(-x2)
return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
x with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def nac(x, depth, name=None, reuse=None):
"""NAC as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nac", values=[x], reuse=reuse):
x_shape = shape_list(x)
w = tf.get_variable("w", [x_shape[-1], depth])
m = tf.get_variable("m", [x_shape[-1], depth])
w = tf.tanh(w) * tf.nn.sigmoid(m)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
res_flat = tf.matmul(x_flat, w)
return tf.reshape(res_flat, x_shape[:-1] + [depth])
def nalu(x, depth, epsilon=1e-30, name=None, reuse=None):
"""NALU as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nalu", values=[x], reuse=reuse):
x_shape = shape_list(x)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
gw = tf.get_variable("w", [x_shape[-1], depth])
g = tf.nn.sigmoid(tf.matmul(x_flat, gw))
g = tf.reshape(g, x_shape[:-1] + [depth])
a = nac(x, depth, name="nac_lin")
log_x = tf.log(tf.abs(x) + epsilon)
m = nac(log_x, depth, name="nac_log")
return g * a + (1 - g) * tf.exp(m)
def argmax_with_score(logits, axis=None):
"""Argmax along with the value."""
axis = axis or len(logits.get_shape()) - 1
predictions = tf.argmax(logits, axis=axis)
logits_shape = shape_list(logits)
prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]
prefix_size = 1
for d in prefix_shape:
prefix_size *= d
# Flatten to extract scores
flat_logits = tf.reshape(logits, [prefix_size, vocab_size])
flat_predictions = tf.reshape(predictions, [prefix_size])
flat_indices = tf.stack(
[tf.range(tf.to_int64(prefix_size)),
tf.to_int64(flat_predictions)],
axis=1)
flat_scores = tf.gather_nd(flat_logits, flat_indices)
# Unflatten
scores = tf.reshape(flat_scores, prefix_shape)
return predictions, scores
def log_prob_from_logits(logits, reduce_axis=-1):
return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True)
def top_kth_iterative(x, k):
"""Compute the k-th top element of x on the last axis iteratively.
This assumes values in x are non-negative, rescale if needed.
It is often faster than tf.nn.top_k for small k, especially if k < 30.
Note: this does not support back-propagation, it stops gradients!
Args:
x: a Tensor of non-negative numbers of type float.
k: a python integer.
Returns:
a float tensor of the same shape as x but with 1 on the last axis
that contains the k-th largest number in x.
"""
# The iterative computation is as follows:
#
# cur_x = x
# for _ in range(k):
# top_x = maximum of elements of cur_x on the last axis
# cur_x = cur_x where cur_x < top_x and 0 everywhere else (top elements)
#
# We encode this computation in a TF graph using tf.foldl, so the inner
# part of the above loop is called "next_x" and tf.foldl does the loop.
def next_x(cur_x, _):
top_x = tf.reduce_max(cur_x, axis=-1, keep_dims=True)
return cur_x * to_float(cur_x < top_x)
# We only do k-1 steps of the loop and compute the final max separately.
fin_x = tf.foldl(next_x, tf.range(k - 1), initializer=tf.stop_gradient(x),
parallel_iterations=2, back_prop=False)
return tf.stop_gradient(tf.reduce_max(fin_x, axis=-1, keep_dims=True))
def top_1_tpu(inputs):
"""find max and argmax over the last dimension.
Works well on TPU
Args:
inputs: A tensor with shape [..., depth]
Returns:
values: a Tensor with shape [...]
indices: a Tensor with shape [...]
"""
inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
mask = tf.to_int32(tf.equal(inputs_max, inputs))
index = tf.range(tf.shape(inputs)[-1]) * mask
return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
def index_last_dim_with_indices(x, indices):
"""Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
"""
assert len(x.shape) == len(indices.shape) + 1
x_shape = shape_list(x)
vocab_size = x_shape[-1]
flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])
idx = tf.stack(
[
tf.range(tf.to_int64(shape_list(flat_indices)[0])),
tf.to_int64(flat_indices)
],
axis=1)
flat_x_idx = tf.gather_nd(flat_x, idx)
x_idx = tf.reshape(flat_x_idx, x_shape[:-1])
return x_idx
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
"""
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.compat.v1.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True
def reshape_like(a, b):
"""Reshapes a to match the shape of b in all but the last dimension."""
ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))
if not tf.executing_eagerly():
ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])
return ret
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix."""
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.executing_eagerly():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs)
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
x_name = "(eager Tensor)"
try:
x_name = x.name
except AttributeError:
pass
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name,
x.device, cast_x.device)
return cast_x
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary."""
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
def sliced_gan_loss(input1,
input2,
discriminator,
num_vecs,
do_random_vecs=True,
do_tanh=True,
return_logits=False):
"""Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947.
Puts input1 and input2 through the provided discriminator to get logits.
Then, computes num_vecs random projections of the logits, sorts them on
the batch dimension and returns the L2 loss between the sorted vectors.
See the above-mentioned paper for the reasoning behind it.
Args:
input1: first discriminator inputs.
input2: second discriminator inputs.
discriminator: inputs -> logits function.
num_vecs: how many random vectors to use for projections.
do_random_vecs: whether to use random vectors or just tanh of the logits.
do_tanh: if true (default) we'll also just use tanh of the logits.
return_logits: Whether or not to return the logits.
Returns:
The generator loss, i.e., the sliced approximation of the distance between
the projected distributions (warning: discriminator should maximize it).
"""
with tf.variable_scope("sliced_gan"):
with tf.variable_scope("discriminator"):
logits1 = discriminator(input1)
with tf.variable_scope("discriminator", reuse=True):
logits2 = discriminator(input2)
if do_random_vecs:
random_vecs = tf.nn.l2_normalize(
tf.random_uniform([shape_list(logits1)[-1], num_vecs]), axis=0)
def get_sorted_projections(x):
"""Make projections of x and sort them on the batch dimension."""
x = tf.reshape(x, [-1, shape_list(x)[-1]])
batch_size = shape_list(x)[0]
if do_random_vecs and do_tanh:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.concat([tf.matmul(n, random_vecs), tf.tanh(n)], axis=1)
elif do_random_vecs:
n = tf.nn.l2_normalize(x, axis=1)
proj = tf.matmul(n, random_vecs)
else:
proj = tf.tanh(x)
proj = tf.transpose(proj, [1, 0]) # [num_vecs, batch] after this.
if is_xla_compiled():
proj_dtype = proj.dtype
proj = tf.cast(proj, tf.bfloat16)
# Currently TPU only supports 1-D top_k calls.
map_fn = lambda x: tf.nn.top_k(x, k=batch_size, sorted=True)[0]
values = tf.map_fn(map_fn, proj)
values = tf.cast(values, proj_dtype)
else:
values, _ = tf.nn.top_k(proj, k=batch_size, sorted=True)
return values
proj1 = get_sorted_projections(logits1)
proj2 = get_sorted_projections(logits2)
dist = tf.reduce_mean(tf.squared_difference(proj1, proj2))
if return_logits:
return dist, logits1, logits2
return dist
def lrelu(input_, leak=0.2, name="lrelu"):
return tf.maximum(input_, leak * input_, name=name)
def deep_discriminator(x,
batch_norm,
is_training,
filters=64,
filter_size=4,
stride=2,
output_size=1024):
"""Discriminator architecture based on InfoGAN."""
with tf.variable_scope(
"discriminator", initializer=tf.random_normal_initializer(stddev=0.02)):
batch_size, height, width = shape_list(x)[:3] # pylint: disable=unbalanced-tuple-unpacking
net = layers().Conv2D(
filters, filter_size, strides=stride, padding="SAME", name="conv1")(x)
net = lrelu(net)
net = layers().Conv2D(
2 * filters,
filter_size,
strides=stride,
padding="SAME",
name="conv2")(net)
# [bs, h/4, w/4, 128]
if batch_norm:
net = layers().BatchNormalization(
training=is_training, momentum=0.999, name="d_bn2")(net)
net = lrelu(net)
size = height * width
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
net = tf.reduce_mean(net, axis=[1, 2])
else:
net = tf.reshape(net, [batch_size, size * 8])
net = layers().Dense(output_size, name="d_fc3")(net)
if batch_norm:
net = layers().BatchNormalization(
training=is_training, momentum=0.999, name="d_bn3")(net)
net = lrelu(net)
return net
def instance_norm(x):
"""Instance normalization layer."""
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable(
"scale", [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable(
"offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
def general_conv(x,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding="VALID",
name="conv",
do_norm="instance",
do_relu=True,
relufactor=0):
"""Generalized convolution layer."""
with tf.variable_scope(name):
x = layers().Conv2D(
num_filters,
filter_size,
stride,
padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
bias_initializer=tf.constant_initializer(0.0))(x)
if do_norm == "layer":
x = layer_norm(x)
elif do_norm == "instance":
x = instance_norm(x)
if do_relu:
if relufactor == 0:
x = tf.nn.relu(x, "relu")
else:
x = lrelu(x, leak=relufactor)
return x
def patch_discriminator(x, filters=64, filter_size=5, n=4,
name="patch_discrim"):
"""Patch descriminator."""
with tf.variable_scope(name):
x_shape = shape_list(x)
spatial_dims = [x_shape[1] // 4, x_shape[2] // 4]
x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]])
for i in range(n):
x = general_conv(
x=x,
num_filters=filters * 2**i,
filter_size=filter_size,
stride=2 if i != n - 1 else 1,
stddev=0.02,
padding="SAME",
name="c%d" % i,
do_norm="instance" if i != 0 else False,
do_relu=i != n - 1,
relufactor=0.2)
x = tf.reduce_mean(x, [1, 2])
return x
def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions."""
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = layers().Dense(num_heads, name="mean_attn")(x)
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return layers().Dense(2 * shape[-1], name="mean_attn_final")(
tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]]))
def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
"""A simple single-layer convolutional discriminator."""
with tf.variable_scope("discriminator"):
net = layers().Conv2D(
filters, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net
def double_discriminator(x, filters1=128, filters2=None,
kernel_size=8, strides=4, pure_mean=False):
"""A convolutional discriminator with 2 layers and concatenated output."""
if filters2 is None:
filters2 = 4 * filters1
with tf.variable_scope("discriminator"):
batch_size = shape_list(x)[0]
net = layers().Conv2D(
filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net1 = tf.reduce_mean(net, [1, 2])
else:
net1 = mean_with_attention(net, "mean_with_attention1")
tf.reshape(net, [batch_size, -1])
net = tf.nn.relu(net)
net = layers().Conv2D(
filters2, kernel_size, strides=strides, padding="SAME",
name="conv2")(net)
if pure_mean:
net2 = tf.reduce_mean(net, [1, 2])
else:
net2 = mean_with_attention(net, "mean_with_attention2")
return tf.concat([net1, net2], axis=-1)
def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):
"""Upscaling the image by a factor of f."""
height, width = shape_list(inputs)[1:3] # pylint: disable=unbalanced-tuple-unpacking
return tf.image.resize_images(inputs, (height * f, width * f), method)
def tpu_safe_image_summary(image):
if is_xla_compiled():
# We only support float32 images at the moment due to casting complications.
if image.dtype != tf.float32:
image = to_float(image)
else:
image = tf.cast(image, tf.uint8)
return image
# This has been (shamefully) copied from
# GitHub tensorflow/models/blob/master/research/slim/nets/cyclegan.py
#
# tensorflow/models cannot be pip installed, and even if it were we don't want
# to depend on all the models in it.
#
# Therefore copying and forgoing any more bugfixes into it is the most
# expedient way to use this function.
def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
"""
with tf.variable_scope("upconv"):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a
# 3x3 "valid" convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == "nn_upsample_conv":
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = layers().Conv2D(
num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif method == "bilinear_upsample_conv":
net = tf.image.resize_bilinear(net,
[stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = layers().Conv2D(
num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif method == "conv2d_transpose":
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing "SAME" padding).
# Note: This doesn"t reflect actual model in paper.
net = layers().Conv2DTranspose(
num_outputs, (3, 3), strides=stride, activation=tf.nn.relu)(net)
net = net[:, 1:, 1:, :]
else:
raise ValueError("Unknown method: [%s]" % method)
return net
def weight_targeting(w, k):
"""Weight-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
transpose_w = tf.transpose(w)
thres = tf.contrib.framework.sort(tf.abs(transpose_w), axis=1)[:, k]
mask = to_float(thres[None, :] >= tf.abs(w))
return tf.reshape(mask, w_shape)
def unit_targeting(w, k):
"""Unit-level magnitude pruning."""
k = tf.to_int32(k)
w_shape = shape_list(w)
size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
w = tf.reshape(w, [size, w_shape[-1]])
norm = tf.norm(w, axis=0)
thres = tf.contrib.framework.sort(norm, axis=0)[k]
mask = to_float(thres >= norm)[None, :]
mask = tf.tile(mask, [size, 1])
return tf.reshape(mask, w_shape)
def td_conv(inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
name=None,
reuse=None):
"""Apply targeted dropout to the weights of a convolution."""
with tf.variable_scope(name, default_name="td_conv", reuse=reuse):
nhwc = data_format == "channels_last"
in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1]
kernel_shape = [kernel_size, kernel_size, in_dim, filters]
w = tf.get_variable(
"DW", shape=kernel_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable("b", shape=[filters], initializer=bias_initializer)
if keep_prob < 1.0:
w = targeted_dropout(
w,
targeting_count,
keep_prob,
targeting_fn,
is_training,
do_prune=do_prune)
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate, dilation_rate]
if nhwc:
strides = [1, strides[0], strides[1], 1]
dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1]
else:
strides = [1, 1, strides[0], strides[1]]
dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]]
y = tf.nn.conv2d(
inputs,
w,
strides,
padding,
data_format="NHWC" if nhwc else "NCHW",
dilations=dilation_rate,
name=None)
if use_bias:
y += b
if activation:
y = activation(y)
return y
def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(to_float(k) * to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs
def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0):
"""KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_var: log(var) parameter of the distribution.
mu_p: optional mu from a learned prior distribution
log_var_p: optional log(var) from a learned prior distribution
Returns:
the KL loss.
"""
batch_size = shape_list(mu)[0]
prior_distribution = tfp.distributions.Normal(
mu_p, tf.exp(tf.multiply(0.5, log_var_p)))
posterior_distribution = tfp.distributions.Normal(
mu, tf.exp(tf.multiply(0.5, log_var)))
kld = tfp.distributions.kl_divergence(posterior_distribution,
prior_distribution)
return tf.reduce_sum(kld) / to_float(batch_size)
def sparse_equals_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
dense_shape=tensor.dense_shape,
values=tf.equal(tensor.values, constant))
def sparse_expand_dims(tensor, current_num_dims, axis=0):
if axis == -1:
axis = current_num_dims
new_col = tf.zeros([tf.shape(tensor.indices)[0]], dtype=tf.int64)
cols = tf.unstack(tensor.indices, axis=1, num=current_num_dims)
shape = tf.unstack(tensor.dense_shape, num=current_num_dims)
new_indices = tf.stack(cols[:axis] + [new_col] + cols[axis:], axis=1)
return tf.SparseTensor(
indices=new_indices,
values=tensor.values,
dense_shape=tf.stack(shape[:axis] + [1] + shape[axis:]))
def sparse_add_constant(constant, tensor):
return tf.SparseTensor(
indices=tensor.indices,
values=constant + tensor.values,
dense_shape=tensor.dense_shape)
def sparse_eye(size):
indices = tf.cast(tf.stack([tf.range(size), tf.range(size)]), tf.int64)
values = tf.ones(size)
dense_shape = [tf.cast(size, tf.int64), tf.cast(size, tf.int64)]
return tf.SparseTensor(
indices=indices, values=values, dense_shape=dense_shape)
# modification from https://github.com/tensorflow/tensorflow/pull/21276
# without special initialization for g
class WeightNorm(tf.keras.layers.Wrapper):
"""Decouple weight magnitude and direction.
This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem.
Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
Tim Salimans, Diederik P. Kingma (2016)
WeightNorm wrapper works for keras and tf layers.
```python
net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3), data_init=True)(x)
net = WeightNorm(tf.keras.layers.Conv2D(16, 5, activation='relu'),
data_init=True)
net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNorm(tf.keras.layers.Dense(n_classes),
data_init=True)(net)
```
Arguments:
layer: a layer instance.
data_init: If `True` use data dependent variable initialization
Raises:
ValueError: If not initialized with a `Layer` instance.
ValueError: If `Layer` does not contain a `kernel` of weights
NotImplementedError: If `data_init` is True and running graph execution
"""
def __init__(self, layer, data_init=False, **kwargs):
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError(
"Please initialize `WeightNorm` layer with a "
"`Layer` instance. You passed: {input}".format(input=layer))
super(WeightNorm, self).__init__(layer, **kwargs)
self._track_trackable(layer, name="layer")
def _compute_weights(self):
"""Generate weights with normalization."""
with tf.variable_scope("compute_weights"):
self.layer.kernel = tf.nn.l2_normalize(
self.layer.v, axis=self.norm_axes) * self.layer.g
def _init_norm(self, weights):
"""Set the norm of the weight vector."""
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,))
def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution."""
with tf.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = tf.moments(x_init, self.norm_axes)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = (-m_init * scale_init)
self.layer.activation = activation
self.initialized = True
def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = layers().InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True
def call(self, inputs):
"""Call `Layer`."""
# if context.executing_eagerly():
# if not self.initialized:
# self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(
self.layer.compute_output_shape(input_shape).as_list())
| 141,390 | 33.168922 | 109 | py |
sequer | sequer-main/code/tensor2tensor/tensor2tensor/layers/common_image_attention_test.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common image attention utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_image_attention
from tensor2tensor.utils import hparam
import tensorflow as tf
class CommonImageAttentionTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testPostProcessImageTrainMode(self, likelihood, num_mixtures, depth):
batch = 1
rows = 8
cols = 24
hparams = hparam.HParams(
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.TRAIN,
num_mixtures=num_mixtures,
)
inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],
minval=-1., maxval=1.)
outputs = common_image_attention.postprocess_image(
inputs, rows, cols, hparams)
self.assertEqual(outputs.shape, (batch, rows, cols, depth))
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testPostProcessImageInferMode(self, likelihood, num_mixtures, depth):
batch = 1
rows = 8
cols = 24
block_length = 4
block_width = 2
hparams = hparam.HParams(
block_raster_scan=True,
hidden_size=2,
likelihood=likelihood,
mode=tf.estimator.ModeKeys.PREDICT,
num_mixtures=num_mixtures,
query_shape=[block_length, block_width],
)
inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size],
minval=-1., maxval=1.)
outputs = common_image_attention.postprocess_image(
inputs, rows, cols, hparams)
num_blocks_rows = rows // block_length
num_blocks_cols = cols // block_width
self.assertEqual(outputs.shape,
(batch, num_blocks_rows, num_blocks_cols,
block_length, block_width, depth))
@parameterized.parameters(
(common_image_attention.DistributionType.DMOL, 5, 50),
(common_image_attention.DistributionType.CAT, None, 256),
)
def testCreateOutputTrainMode(self, likelihood, num_mixtures, depth):
batch = 1
height = 8
width = 8
channels = 3
rows = height
if likelihood == common_image_attention.DistributionType.CAT:
cols = channels * width
else:
cols = width
hparams = hparam.HParams(
hidden_size=2,
likelihood=likelihood,
num_channels=channels,
mode=tf.estimator.ModeKeys.TRAIN,
num_mixtures=num_mixtures,
)
decoder_output = tf.random_normal([batch, rows, cols, hparams.hidden_size])
targets = tf.random_uniform([batch, height, width, channels],
minval=-1., maxval=1.)
output = common_image_attention.create_output(
decoder_output, rows, cols, targets, hparams)
if hparams.likelihood == common_image_attention.DistributionType.CAT:
self.assertEqual(output.shape, (batch, height, width, channels, depth))
else:
self.assertEqual(output.shape, (batch, height, width, depth))
def testTransformerDecoderLayersGlobal(self):
one_hot_data = tf.constant([[[0., 1.], [1., 0.]],
[[0., 1.], [1., 0.]],
[[1., 0.], [1., 0.]]])
hparams = common_hparams.basic_params1()
hparams.hidden_size = 4
hparams.num_layers = 1
hparams.layer_prepostprocess_dropout = 0.
hparams.add_hparam("attention_key_channels", None)
hparams.add_hparam("attention_value_channels", None)
hparams.add_hparam("num_heads", 1)
hparams.add_hparam("attention_dropout", 0.)
hparams.add_hparam("shared_rel", False)
hparams.add_hparam("block_width", 1)
hparams.add_hparam("block_length", 1)
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("filter_size", 16)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
hparams.add_hparam("relu_dropout", 0.)
conv_1d = tf.keras.layers.Conv1D(filters=hparams.hidden_size,
kernel_size=1,
use_bias=False)
shifted_data = tf.pad(one_hot_data, [[0, 0], [1, 0], [0, 0]])[..., :-1, :]
net = conv_1d(shifted_data)
output = common_image_attention.transformer_decoder_layers(
inputs=net,
encoder_output=None,
num_layers=hparams.num_layers,
hparams=hparams,
self_attention_bias=common_image_attention.get_self_attention_bias(net),
attention_type=common_image_attention.AttentionType.GLOBAL)
self.evaluate(tf.global_variables_initializer())
output_val = self.evaluate(output)
# The outputs for the padded dimension should be equal across all data.
self.assertAllEqual(output_val[0, 0], output_val[1, 0])
self.assertAllEqual(output_val[1, 0], output_val[2, 0])
# The first and second elements of the batch are identical, so they should
# have the same outputs for the second latent dimension as well.
self.assertAllEqual(output_val[0, 1], output_val[1, 1])
if __name__ == "__main__":
tf.test.main()
| 6,040 | 37.234177 | 80 | py |
sequer | sequer-main/code/tensor2tensor/tensor2tensor/layers/ngram.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""N-gram layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class NGram(tf.keras.layers.Layer):
r"""N-gram layer.
The layer takes as input an integer Tensor of shape [..., length], each
element of which is a token index in [0, input_dim). It returns a real-valued
Tensor of shape [..., num_ngrams], counting the number of times each n-gram
appears in a batch element. The total number of n-grams is
```none
num_ngrams = \sum_{minval <= n < maxval} input_dim^n.
```
"""
def __init__(self, input_dim, minval, maxval, **kwargs):
"""Constructs layer.
Args:
input_dim: int > 0. Size of the vocabulary, i.e. maximum integer index +
1.
minval: Lowest inclusive value of n for computing n-grams. For example,
setting it to 1 will compute starting from unigrams.
maxval: Highest non-inclusive value of n for computing n-grams. For
example, setting it to 3 will compute at most bigrams.
**kwargs: kwargs of parent class.
"""
super(NGram, self).__init__(**kwargs)
self.input_dim = input_dim
self.minval = minval
self.maxval = maxval
def call(self, inputs):
batch_shape = tf.shape(inputs)[:-1]
length = tf.shape(inputs)[-1]
ngram_range_counts = []
for n in range(self.minval, self.maxval):
# Reshape inputs from [..., length] to [..., 1, length // n, n], dropping
# remainder elements. Each n-vector is an ngram.
reshaped_inputs = tf.reshape(
inputs[..., :(n * (length // n))],
tf.concat([batch_shape, [1], (length // n)[tf.newaxis], [n]], 0))
# Count the number of times each ngram appears in the input. We do so by
# checking whether each n-vector in the input is equal to each n-vector
# in a Tensor of all possible ngrams. The comparison is batched between
# the input Tensor of shape [..., 1, length // n, n] and the ngrams Tensor
# of shape [..., input_dim**n, 1, n].
ngrams = tf.reshape(
list(np.ndindex((self.input_dim,) * n)),
[1] * (len(inputs.shape)-1) + [self.input_dim**n, 1, n])
is_ngram = tf.equal(
tf.reduce_sum(tf.cast(tf.equal(reshaped_inputs, ngrams), tf.int32),
axis=-1),
n)
ngram_counts = tf.reduce_sum(tf.cast(is_ngram, tf.float32), axis=-1)
ngram_range_counts.append(ngram_counts)
return tf.concat(ngram_range_counts, axis=-1)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
num_ngrams = sum([self.input_dim**n
for n in range(self.minval, self.maxval)])
return input_shape[:-1].concatenate(num_ngrams)
def get_config(self):
config = {'minval': self.minval,
'maxval': self.maxval}
base_config = super(NGram, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 3,605 | 37.774194 | 80 | py |
sequer | sequer-main/code/tensor2tensor/tensor2tensor/layers/modalities.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modalities, which specify a feature's domain.
T2TModel applies a default transformation to each feature according to its
modality. Override them by specifying a model's
hparams.{bottom,loss,top,weights_fn}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_audio
from tensor2tensor.layers import common_image_attention as cia
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import common_video
from tensor2tensor.layers import discretization
import tensorflow as tf
import tensorflow_probability as tfp
class ModalityType(object):
"""Types of modalities."""
AUDIO = "audio"
AUDIO_SPECTRAL = "audio_spectral"
CLASS_LABEL = "class_label"
CTC_SYMBOL = "ctc_symbol" # symbol with CTC loss
GENERIC_L2_LOSS = "generic_l2" # identity modality with L2 loss
IDENTITY = "identity" # identity top and bottom
IDENTITY_SYMBOL = "identity_symbol" # symbol with identity top and bottom
IMAGE = "image"
# images using channel compression for generation
IMAGE_CHANNEL_BOTTOM_IDENTITY = "image_channel_bottom_identity"
# images using channel compression for generation
IMAGE_CHANNEL_COMPRESS = "image_channel_compress"
IMAGE_CHANNEL_EMBEDDINGS_BOTTOM = "image_channel_embeddings_bottom"
MULTI_LABEL = "multi_label"
ONE_HOT_CLASS_LABEL = "one_hot_class_label"
REAL = "real" # real vectors
REAL_L2_LOSS = "real_l2" # real vectors with L2 as loss
# real vectors with log Poisson regression loss
REAL_LOG_POISSON_LOSS = "real_log_poisson"
SIGMOID_CLASS_LABEL = "sigmoid_class_label" # sigmoid cross-entropy loss
# sigmoid cross-entropy applied on max-pooling over timesteps
SIGMOID_MAX_POOLING_CLASS_LABEL = "sigmoid_max_pooling_class_label"
# softmax cross-entropy applied on average-pooling over timesteps
SOFTMAX_AVERAGE_POOLING_CLASS_LABEL = "softmax_average_pooling_class_label"
# softmax cross-entropy applied on last-timestep encoding
SOFTMAX_LAST_TIMESTEP_CLASS_LABEL = "softmax_last_timestep_class_label"
# softmax cross-entropy applied on max-pooling over timesteps
SOFTMAX_MAX_POOLING_CLASS_LABEL = "softmax_max_pooling_class_label"
SPEECH_RECOGNITION = "speech_recognition"
SYMBOL = "symbol"
SYMBOL_WEIGHTS_ALL = "symbol_weights_all" # symbol for features w/o 0-padding
SYMBOL_ONE_HOT = "symbol_one_hot" # symbol with one hot as embeddings
VIDEO = "video"
VIDEO_BITWISE = "video_bitwise" # video where bottom embeds pixels bitwise
VIDEO_IDENTITY = "video_identity" # video with identity top and bottom
VIDEO_L1 = "video_l1" # video with L2 loss
VIDEO_L2 = "video_l2" # video with L1 loss
# video with L1 loss and raw input (sequences of frames)
VIDEO_L1_RAW = "video_l1_raw"
# video with L2 loss and raw input (sequences of frames)
VIDEO_L2_RAW = "video_l2_raw"
# video with pixel noise on input during training
VIDEO_PIXEL_NOISE = "video_pixel_noise"
@staticmethod
def get_choices():
return [
ModalityType.AUDIO,
ModalityType.AUDIO_SPECTRAL,
ModalityType.CLASS_LABEL,
ModalityType.CTC_SYMBOL,
ModalityType.GENERIC_L2_LOSS,
ModalityType.IDENTITY,
ModalityType.IDENTITY_SYMBOL,
ModalityType.IMAGE,
ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.IMAGE_CHANNEL_COMPRESS,
ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.REAL,
ModalityType.REAL_L2_LOSS,
ModalityType.REAL_LOG_POISSON_LOSS,
ModalityType.SIGMOID_CLASS_LABEL,
ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL,
ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL,
ModalityType.SPEECH_RECOGNITION,
ModalityType.SYMBOL,
ModalityType.SYMBOL_ONE_HOT,
ModalityType.SYMBOL_WEIGHTS_ALL,
ModalityType.VIDEO,
ModalityType.VIDEO_BITWISE,
ModalityType.VIDEO_IDENTITY,
ModalityType.VIDEO_L1,
ModalityType.VIDEO_L2,
ModalityType.VIDEO_L1_RAW,
ModalityType.VIDEO_L2_RAW,
ModalityType.VIDEO_PIXEL_NOISE,
]
# Bottom transformations, applied to all features
def audio_bottom(x, model_hparams, vocab_size):
"""Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size].
"""
del vocab_size # unused arg
inputs = x
with tf.variable_scope("audio_modality"):
# TODO(aidangomez): Will need to sort out a better audio pipeline
def xnet_resblock(x, filters, res_relu, name):
"""Xception block."""
with tf.variable_scope(name):
# Typically audio samples are >100k samples in length and have a width
# of 2 or 4. Mono audio has a single channel while stereo has 2.
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 2),
first_relu=res_relu,
force2d=True,
name="res_conv0")
x = tf.to_float(inputs) / 255.
x.set_shape([None, None, None, 1])
for i in range(model_hparams.audio_compression):
x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i)
return xnet_resblock(x,
model_hparams.hidden_size,
False,
"compress_block_final")
def audio_spectral_bottom(x, model_hparams, vocab_size):
"""Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size].
"""
del vocab_size # unused arg
inputs = x
with tf.variable_scope("audio_spectral_modality"):
# TODO(aidangomez): Will need to sort out a better audio pipeline
def xnet_resblock(x, filters, res_relu, name):
"""Xception-like block."""
with tf.variable_scope(name):
# We only stride along the length dimension to preserve the spectral
# bins (which are tiny in dimensionality relative to length)
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 1))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 1),
first_relu=res_relu,
force2d=True,
name="res_conv0")
# Bitcast back from int32
x = tf.bitcast(inputs, tf.float32)
x.set_shape([None, None, None, 1])
for i in range(model_hparams.audio_compression):
x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i)
return xnet_resblock(x,
model_hparams.hidden_size,
False,
"compress_block_final")
def class_label_bottom(x, model_hparams, vocab_size):
with tf.variable_scope("class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
multiplier = 1.0
if model_hparams.multiply_embedding_mode == "sqrt_depth":
multiplier = model_hparams.hidden_size**0.5
return common_layers.embedding(x,
vocab_size,
model_hparams.hidden_size,
multiplier=multiplier)
def class_label_targets_bottom(x, model_hparams, vocab_size):
with tf.variable_scope("class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
return tf.zeros([common_layers.shape_list(x)[0],
1,
1,
model_hparams.hidden_size])
def identity_bottom(x, model_hparams, vocab_size):
del model_hparams, vocab_size # unused arg
return tf.to_float(x)
def image_bottom(x, model_hparams, vocab_size):
del model_hparams, vocab_size # unused arg
with tf.variable_scope("image_modality"):
if not tf.executing_eagerly():
tf.summary.image(
"inputs", common_layers.tpu_safe_image_summary(x), max_outputs=2)
return tf.to_float(x)
def image_targets_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for target images."""
pixel_embedding_size = 64
inputs = x
with tf.variable_scope("image_modality"):
if not tf.executing_eagerly():
tf.summary.image(
"targets_bottom",
common_layers.tpu_safe_image_summary(inputs),
max_outputs=1)
inputs_shape = common_layers.shape_list(inputs)
if len(inputs_shape) != 4:
raise ValueError("Assuming images given as int tensors in the format "
"[batch, height, width, channels] (256 values).")
# We embed each of 256=vocab_size possible pixel values.
embedding_var = tf.get_variable(
"pixel_embedding",
[vocab_size, pixel_embedding_size])
hot_inputs = tf.one_hot(tf.to_int32(inputs), vocab_size)
hot_inputs = tf.reshape(hot_inputs, [-1, vocab_size])
embedded = tf.matmul(hot_inputs, embedding_var)
# Let's now merge all channels that were embedded into a single vector.
merged_size = pixel_embedding_size * inputs_shape[3]
embedded = tf.reshape(embedded, inputs_shape[:3] + [merged_size])
merged = tf.layers.dense(
embedded,
model_hparams.hidden_size,
name="merge_pixel_embedded_channels")
return merged
def _image_channel_compress_bottom(inputs, model_hparams, name="bottom"):
"""Compresses channel-wise input pixels into whole pixel representions.
Perform conversion of RGB pixel values to a real number in the range -1 to
1. This combines pixel channels to form a representation of shape
[img_len, img_len].
Args:
inputs: Tensor representing RGB pixel intensities as integers, of shape
[batch, img_len, img_len, channels].
model_hparams: HParams, model hyperparmeters.
name: string, scope.
Returns:
body_input: Tensor of shape
[batch, img_len, img_len, model_hparams.hidden_size].
"""
num_channels = 3
with tf.variable_scope(name):
inputs = tf.to_float(inputs)
hp = model_hparams
if hp.mode != tf.estimator.ModeKeys.PREDICT:
tf.summary.image(
"inputs",
common_layers.tpu_safe_image_summary(inputs),
max_outputs=2)
inputs = common_layers.convert_rgb_to_symmetric_real(inputs)
# Reshape inputs to apply convolutions across [img_len, img_len*channels].
inputs_shape = common_layers.shape_list(inputs)
inputs = tf.reshape(
inputs, [-1, inputs_shape[1], inputs_shape[2] * inputs_shape[3], 1])
# Compress RGB intensities for each pixel using a convolution.
outputs = tf.layers.conv2d(
inputs,
model_hparams.hidden_size,
kernel_size=(1, num_channels),
padding="VALID",
strides=(1, num_channels),
activation=tf.nn.relu,
name="conv_input")
return outputs
def image_channel_compress_bottom(x, model_hparams, vocab_size):
del vocab_size # unused arg
return _image_channel_compress_bottom(x, model_hparams, "input_bottom")
def image_channel_compress_targets_bottom(x, model_hparams, vocab_size):
del vocab_size # unused arg
return _image_channel_compress_bottom(x, model_hparams, "output_bottom")
def image_channel_embeddings_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for image targets."""
del vocab_size # unused arg
inputs = tf.to_int32(x)
io_depth = model_hparams.num_channels
tshape = common_layers.shape_list(inputs)
hidden_size = model_hparams.hidden_size
target_embeddings = cia.get_channel_embeddings(
io_depth, inputs, hidden_size, "input_bottom")
return tf.reshape(target_embeddings,
[tshape[0], tshape[1], tshape[2] * io_depth, hidden_size])
def make_targets_bottom(bottom):
def targets_bottom(x, model_hparams, vocab_size):
with tf.variable_scope("targets_bottom"):
return bottom(x, model_hparams, vocab_size)
return targets_bottom
def real_bottom(x, model_hparams, vocab_size):
del vocab_size # unused arg
with tf.variable_scope("real"):
return tf.layers.dense(
tf.to_float(x), model_hparams.hidden_size, name="bottom")
def speech_recognition_bottom(x, model_hparams, vocab_size):
"""Use batchnorm instead of CMVN and shorten the stft with strided convs.
Args:
x: float32 tensor with shape [batch_size, len, 1, freqs * channels]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
float32 tensor with shape [batch_size, shorter_len, 1, hidden_size]
"""
del vocab_size # unused arg
inputs = x
p = model_hparams
num_mel_bins = p.audio_num_mel_bins
num_channels = 3 if p.audio_add_delta_deltas else 1
with tf.variable_scope("speech_recognition_modality"):
if p.audio_preproc_in_bottom:
# Compute filterbanks
with tf.variable_scope("fbanks"):
waveforms = tf.squeeze(inputs, [2, 3])
mel_fbanks = common_audio.compute_mel_filterbank_features(
waveforms,
sample_rate=p.audio_sample_rate,
dither=p.audio_dither,
preemphasis=p.audio_preemphasis,
frame_length=p.audio_frame_length,
frame_step=p.audio_frame_step,
lower_edge_hertz=p.audio_lower_edge_hertz,
upper_edge_hertz=p.audio_upper_edge_hertz,
num_mel_bins=p.audio_num_mel_bins,
apply_mask=True)
if p.audio_add_delta_deltas:
mel_fbanks = common_audio.add_delta_deltas(mel_fbanks)
x = tf.reshape(mel_fbanks,
common_layers.shape_list(mel_fbanks)[:2] +
[num_mel_bins, num_channels])
nonpadding_mask = 1. - common_attention.embedding_to_padding(x)
num_of_nonpadding_elements = tf.reduce_sum(
nonpadding_mask) * num_mel_bins * num_channels
# This replaces CMVN estimation on data
var_epsilon = 1e-09
mean = tf.reduce_sum(
x, axis=[1], keepdims=True) / num_of_nonpadding_elements
variance = (num_of_nonpadding_elements * mean**2. -
2. * mean * tf.reduce_sum(x, axis=[1], keepdims=True) +
tf.reduce_sum(x**2, axis=[1], keepdims=True)
) / num_of_nonpadding_elements
x = (x - mean) * tf.rsqrt(variance + var_epsilon) * tf.expand_dims(
nonpadding_mask, -1)
else:
x = inputs
# The convention is that the models are flattened along the spatial,
# dimensions, thus the speech preprocessor treats frequencies and
# channels as image colors (last axis)
x.set_shape([None, None, num_mel_bins, num_channels])
# TODO(chorowski): how to specify bottom's hparams and avoid hardcoding?
x = tf.pad(x, [[0, 0], [0, 8], [0, 0], [0, 0]])
for _ in range(2):
x = tf.layers.conv2d(
x, 128, (3, 3), (2, 2), use_bias=False)
x = common_layers.layer_norm(x)
x = tf.nn.relu(x)
xshape = common_layers.shape_list(x)
# apply a conv that will remove all frequencies and at the same time
# project the output into desired hidden_size
x = tf.pad(x, [[0, 0], [0, 2], [0, 0], [0, 0]])
x = tf.layers.conv2d(x, p.hidden_size, (3, xshape[2]), use_bias=False)
assert common_layers.shape_list(x)[2] == 1
x = common_layers.layer_norm(x)
x = tf.nn.relu(x)
return x
def get_weights(model_hparams, vocab_size, hidden_dim=None):
"""Create or get concatenated embedding or softmax variable.
Args:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size
Returns:
a list of num_shards Tensors.
"""
if hidden_dim is None:
hidden_dim = model_hparams.hidden_size
num_shards = model_hparams.symbol_modality_num_shards
shards = []
for i in range(num_shards):
shard_size = (vocab_size // num_shards) + (
1 if i < vocab_size % num_shards else 0)
var_name = "weights_%d" % i
shards.append(
tf.get_variable(
var_name, [shard_size, hidden_dim],
initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))
if num_shards == 1:
ret = shards[0]
else:
ret = tf.concat(shards, 0)
# Convert ret to tensor.
if not tf.executing_eagerly():
ret = common_layers.convert_gradient_to_tensor(ret)
return ret
def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse):
"""Bottom transformation for symbols."""
with tf.variable_scope(name, reuse=reuse):
# Ensure the inputs are 3-D
if len(x.get_shape()) == 4:
x = tf.squeeze(x, axis=3)
while len(x.get_shape()) < 3:
x = tf.expand_dims(x, axis=-1)
var = get_weights(model_hparams, vocab_size)
x = common_layers.dropout_no_scaling(
x, 1.0 - model_hparams.symbol_dropout)
ret = common_layers.gather(var, x)
if model_hparams.multiply_embedding_mode == "sqrt_depth":
ret *= model_hparams.hidden_size**0.5
ret *= tf.expand_dims(
common_layers.cast_like(tf.not_equal(x, 0), ret), -1)
return ret
def symbol_bottom(x, model_hparams, vocab_size):
if (model_hparams.shared_embedding_and_softmax_weights or
model_hparams.get("shared_embedding")):
return _symbol_bottom_simple(
x, model_hparams, vocab_size, "shared", reuse=None)
return _symbol_bottom_simple(
x, model_hparams, vocab_size, "input_emb", reuse=None)
def symbol_targets_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for target symbols."""
if (model_hparams.shared_embedding_and_softmax_weights or
model_hparams.get("shared_embedding")):
try:
return _symbol_bottom_simple(
x, model_hparams, vocab_size, "shared", reuse=True)
except ValueError:
# perhaps there were no inputs, and this is a new variable.
return _symbol_bottom_simple(
x, model_hparams, vocab_size, "shared", reuse=None)
else:
return _symbol_bottom_simple(
x, model_hparams, vocab_size, "target_emb", reuse=None)
def symbol_one_hot_bottom(x, model_hparams, vocab_size):
del model_hparams # unused arg
return tf.one_hot(x, vocab_size)
def video_bottom(x, model_hparams, vocab_size):
del model_hparams, vocab_size # unused arg
common_video.gif_summary("inputs", x, max_outputs=1)
x = common_layers.standardize_images(x)
return x
def video_targets_bottom(x, model_hparams, vocab_size):
del model_hparams, vocab_size # unused arg
common_video.gif_summary("targets", x, max_outputs=1)
x = common_layers.standardize_images(x)
return x
def video_bitwise_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for embedding video bitwise."""
pixel_embedding_size = 64
inputs = x
with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE):
common_layers.summarize_video(inputs, "bottom")
# Embed bitwise.
assert vocab_size == 256
embedded = discretization.int_to_bit_embed(inputs, 8,
pixel_embedding_size)
# Project.
return tf.layers.dense(
embedded,
model_hparams.hidden_size,
name="merge_pixel_embedded_frames")
def video_bitwise_targets_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for embedding target video bitwise."""
pixel_embedding_size = 64
inputs = x
with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE):
common_layers.summarize_video(inputs, "targets_bottom")
# Embed bitwise.
assert vocab_size == 256
embedded = discretization.int_to_bit_embed(inputs, 8,
pixel_embedding_size)
# Transpose and project.
transposed = common_layers.time_to_channels(embedded)
return tf.layers.dense(
transposed,
model_hparams.hidden_size,
name="merge_pixel_embedded_frames")
def video_identity_bottom(x, model_hparams, vocab_size):
del model_hparams, vocab_size # unused arg
common_video.gif_summary("inputs", x, max_outputs=1)
return x
def video_identity_targets_bottom(x, model_hparams, vocab_size):
del model_hparams, vocab_size # unused arg
common_video.gif_summary("targets", x, max_outputs=1)
return x
def video_pixel_noise_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for video."""
input_noise = getattr(model_hparams, "video_modality_input_noise", 0.25)
inputs = x
if model_hparams.mode == tf.estimator.ModeKeys.TRAIN:
background = tfp.stats.percentile(inputs, 50., axis=[0, 1, 2, 3])
input_shape = common_layers.shape_list(inputs)
input_size = tf.reduce_prod(input_shape[:-1])
input_mask = tf.multinomial(
tf.log([[input_noise, 1.-input_noise]]), input_size)
input_mask = tf.reshape(tf.cast(input_mask, tf.int32),
input_shape[:-1]+[1])
inputs = inputs * input_mask + background * (1 - input_mask)
return video_bottom(inputs, model_hparams, vocab_size)
def convert_rgb_to_real(prediction, targets):
"""Convert prediction and target from rgb to real."""
prediction = tf.squeeze(prediction, axis=-1)
prediction = common_layers.convert_rgb_to_real(prediction)
targets = common_layers.convert_rgb_to_real(targets)
return prediction, targets
def video_raw_bottom(x, model_hparams, vocab_size):
del model_hparams, vocab_size # unused arg
common_video.gif_summary("inputs", x)
return common_layers.convert_rgb_to_real(x)
def video_raw_targets_bottom(x, model_hparams, vocab_size):
del model_hparams, vocab_size # unused arg
common_video.gif_summary("targets_bottom", x)
return common_layers.convert_rgb_to_real(x)
# Loss transformations, applied to target features
def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn):
"""Compute the CTC loss."""
del model_hparams, vocab_size # unused arg
logits = top_out
with tf.name_scope("ctc_loss", values=[logits, targets]):
# For CTC we assume targets are 1d, [batch, length, 1, 1] here.
targets_shape = targets.get_shape().as_list()
assert len(targets_shape) == 4
assert targets_shape[2] == 1
assert targets_shape[3] == 1
targets = tf.squeeze(targets, axis=[2, 3])
logits = tf.squeeze(logits, axis=[2, 3])
targets_mask = 1 - tf.to_int32(tf.equal(targets, 0))
targets_lengths = tf.reduce_sum(targets_mask, axis=1)
sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse(
targets, targets_lengths)
xent = tf.nn.ctc_loss(
sparse_targets,
logits,
targets_lengths,
time_major=False,
preprocess_collapse_repeated=False,
ctc_merge_repeated=False)
weights = weight_fn(targets)
return tf.reduce_sum(xent), tf.reduce_sum(weights)
def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = common_attention.maybe_upcast(logits, hparams=model_hparams)
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0)
return common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
cutoff=cutoff,
weights_fn=weights_fn)
def generic_l2_loss(body_output,
targets,
model_hparams,
vocab_size,
weights_fn):
del model_hparams, vocab_size, weights_fn # unused arg
loss = tf.squared_difference(body_output, tf.to_float(targets))
return tf.reduce_mean(loss), tf.constant(1.0)
def multi_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Average loss over the labels."""
del vocab_size # unused arg
logits = top_out
num_labels = tf.shape(targets)[1]
logits = tf.tile(logits, [1, num_labels, 1, 1, 1])
xent, weights = common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
weights_fn=weights_fn,
reduce_sum=False,
)
xent = tf.squeeze(xent, [2, 3])
weights = tf.squeeze(weights, [2, 3])
# average loss over all labels
loss = tf.reduce_sum(xent, axis=1)
weights = tf.reduce_sum(weights, axis=1)
loss /= (weights + 1e-8)
weights = tf.to_float(tf.greater(weights, 0.))
return tf.reduce_sum(loss*weights), tf.reduce_sum(weights)
def one_hot_class_label_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
"""Apply softmax cross-entropy between outputs and targets.
Args:
top_out: logits Tensor with shape [batch, ?, ?, num_classes]
targets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
weights_fn:
Returns:
loss_scale (cross-entropy), loss_denom
"""
del model_hparams, vocab_size # unused arg
loss_scale = tf.losses.softmax_cross_entropy(
onehot_labels=targets, logits=top_out)
weights = weights_fn(targets)
loss_denom = tf.reduce_sum(weights)
return loss_scale, loss_denom
def real_l2_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
del model_hparams, vocab_size # unused arg
predictions = top_out
if (len(common_layers.shape_list(top_out)) != len(
common_layers.shape_list(targets))):
predictions = tf.squeeze(top_out, axis=[-1])
with tf.name_scope("l2"):
weights = weights_fn(targets)
l2 = tf.pow(predictions - targets, 2)
return tf.reduce_sum(l2 * weights), tf.reduce_sum(weights)
def real_log_poisson_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
"""Poisson loss for real."""
del model_hparams, vocab_size # unused arg
predictions = top_out
if (len(common_layers.shape_list(top_out)) != len(
common_layers.shape_list(targets))):
predictions = tf.squeeze(top_out, axis=[-1])
with tf.name_scope("log_possion"):
weights = weights_fn(targets)
lp_loss = tf.nn.log_poisson_loss(targets, predictions)
return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights)
def sigmoid_class_label_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
"""Loss for class label."""
# Expect inputs of size [batch-size, timesteps, 1, num-classes], where the
# last dimension of num-classes represents logits for binary labels
del model_hparams, vocab_size # unused arg
loss_scale = tf.losses.sigmoid_cross_entropy(
multi_class_labels=targets, logits=top_out)
weights = weights_fn(targets)
loss_denom = tf.reduce_sum(weights)
return loss_scale, loss_denom
def sigmoid_max_pooling_class_label_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
"""Loss for class label."""
# Expect inputs of size [batch-size, 1, 1, num-classes], where the
# last dimension of num-classes represents logits for binary labels
del model_hparams, vocab_size # unused arg
loss_scale = tf.losses.sigmoid_cross_entropy(
multi_class_labels=targets, logits=top_out)
weights = weights_fn(targets)
loss_denom = tf.reduce_sum(weights)
return loss_scale, loss_denom
def symbol_one_hot_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
del model_hparams, weights_fn # unused arg
labels = tf.one_hot(targets, vocab_size)
loss = tf.nn.softmax_cross_entropy_with_logits(
logits=top_out, labels=labels)
return tf.reduce_mean(loss), tf.constant(1.0)
def video_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.01)
return common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
cutoff=cutoff,
weights_fn=weights_fn)
def video_identity_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
# TODO(nikip): Try L2 loss
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.01)
return common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
cutoff=cutoff,
weights_fn=weights_fn)
def video_l1_internal_loss(logits, targets, model_hparams):
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.2)
return tf.nn.relu(tf.abs(logits - targets) - cutoff)
def video_l1_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:-1])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
weights = weights_fn(targets)
# Shift targets by 0.5 so later just casting to int gives the prediction.
# So for int targets, say 0 and 7, we actually train to predict 0.5 and 7.5.
# Later (in merics or infer) this is cast to int anyway. Also, we have no
# loss beyond cutoff = 0.2 as these are already correct predictions.
targets = tf.to_float(targets) + 0.5
loss = video_l1_internal_loss(logits, targets, model_hparams)
return tf.reduce_sum(loss * weights), tf.reduce_sum(weights)
def video_l2_internal_loss(logits, targets, model_hparams):
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.2)
return tf.nn.relu(
tf.squared_difference(logits, targets) - cutoff * cutoff)
def video_l2_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:-1])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
weights = weights_fn(targets)
# Shift targets by 0.5 so later just casting to int gives the prediction.
# So for int targets, say 0 and 7, we actually train to predict 0.5 and 7.5.
# Later (in merics or infer) this is cast to int anyway. Also, we have no
# loss beyond cutoff = 0.2 as these are already correct predictions.
targets = tf.to_float(targets) + 0.5
loss = video_l2_internal_loss(logits, targets, model_hparams)
return tf.reduce_sum(loss * weights), tf.reduce_sum(weights)
def video_l2_raw_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
del model_hparams, vocab_size, weights_fn # unused arg
prediction, groundtruth = convert_rgb_to_real(top_out, targets)
loss = tf.losses.mean_squared_error(prediction, groundtruth)
return loss, tf.constant(1.0)
def video_l1_raw_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
del model_hparams, vocab_size, weights_fn # unused arg
prediction, groundtruth = convert_rgb_to_real(top_out, targets)
loss = tf.losses.absolute_difference(prediction, groundtruth)
return loss, tf.constant(1.0)
# Top transformations, applied to target features
def is_pointwise(func):
"""Decorator for whether the function is pointwise.
An example of a pointwise function is a linear layer followed by
a softmax. Given a tensor [batch, length, height, depth] it operates
only on the last axis, on every point in [batch, length, height] fully
independently. In contrast, a classifier that first averages over length
and height is not pointwise, as it depends on the whole field. It is useful
to know if top functions are pointwise to speed up decoding in certain models.
Args:
func: Function to decorate.
Returns:
Original function with an attribute pointwise set to True.
"""
func.pointwise = True
return func
def class_label_top(body_output, targets, model_hparams, vocab_size):
"""Transform inputs from model space to target space.
Average over inner dims and a linear layer to logits.
Args:
body_output: A Tensor with shape [batch, ?, ?, body_output_size].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
a Tensors, each with shape [batch_size, 1, 1, 1, vocab_size]
"""
del targets # unused arg
with tf.variable_scope("class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
res = tf.layers.dense(x, vocab_size)
return tf.expand_dims(res, 3)
def identity_top(body_output, targets, model_hparams, vocab_size):
del targets, model_hparams, vocab_size # unused arg
return body_output
def image_top(body_output, targets, model_hparams, vocab_size):
"""Top transformation for images."""
del targets # unused arg
# TODO(lukaszkaiser): is this a universal enough way to get channels?
num_channels = model_hparams.problem.num_channels
with tf.variable_scope("rgb_softmax"):
body_output_shape = common_layers.shape_list(body_output)
reshape_shape = body_output_shape[:3]
reshape_shape.extend([num_channels, vocab_size])
res = tf.layers.dense(body_output, vocab_size * num_channels)
res = tf.reshape(res, reshape_shape)
if not tf.get_variable_scope().reuse:
res_argmax = tf.argmax(res, axis=-1)
tf.summary.image(
"result",
common_layers.tpu_safe_image_summary(res_argmax),
max_outputs=1)
return res
def image_channel_compress_top(body_output, targets, model_hparams, vocab_size):
"""Transforms body output to return logits.
Args:
body_output: Tensor of shape [batch, img_len, img_len, depth].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
Tensor of shape [batch, img_len, img_len, channels, vocab_size].
"""
del targets # unused arg
with tf.variable_scope("image_channel_compress_modality"):
hidden_size = model_hparams.hidden_size
img_len = model_hparams.img_len
channels = 3 # RGB
batch = common_layers.shape_list(body_output)[0]
x = tf.layers.conv2d(
body_output,
hidden_size * channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
activation=tf.nn.relu,
name="decompress_conv")
x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_size])
x = common_layers.layer_preprocess(x, model_hparams)
x = tf.layers.dense(x,
vocab_size,
use_bias=True,
activation=None,
name="output_conv")
x = tf.reshape(
x, [batch, img_len, img_len, channels, vocab_size])
return x
def image_channel_embeddings_top(body_output,
targets,
model_hparams,
vocab_size):
"""Top transformation for images."""
del targets # unused arg
with tf.variable_scope("image_channel_embeddings_bottom"):
img_len = model_hparams.img_len
channels = model_hparams.num_channels
x = tf.layers.dense(
body_output, 256, use_bias=True, activation=None, name="output_conv")
x = tf.reshape(x,
[-1, img_len, img_len, channels, vocab_size])
return x
@is_pointwise
def real_top(body_output, targets, model_hparams, vocab_size):
del targets, model_hparams # unused arg
with tf.variable_scope("real"):
return tf.layers.dense(body_output, vocab_size, name="top")
def sigmoid_max_pooling_class_label_top(body_output,
targets,
model_hparams,
vocab_size):
"""Transform inputs from model space to target space.
Average over inner dims and a linear layer to logits.
Args:
body_output: A Tensor with shape [batch, timesteps, 1, body_output_size].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
a Tensors, each with shape [batch_size, 1, 1, vocab_size]
"""
del targets # unused arg
with tf.variable_scope(
"sigmoid_max_pooling_class_symbol_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.reduce_max(x, axis=1, keepdims=True)
return tf.layers.dense(x, vocab_size)
def softmax_average_pooling_class_label_top(body_output,
targets,
model_hparams,
vocab_size):
"""Loss for class label."""
del targets # unused arg
with tf.variable_scope(
"softmax_average_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.reduce_mean(x, axis=1, keepdims=True)
return tf.layers.dense(x, vocab_size)
def softmax_last_timestep_class_label_top(body_output,
targets,
model_hparams,
vocab_size):
"""Loss for class label."""
del targets # unused arg
with tf.variable_scope(
"softmax_last_timestep_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.expand_dims(x[:, -1], 1) # Pick the last timestep
return tf.layers.dense(x, vocab_size)
def softmax_max_pooling_class_label_top(body_output,
targets,
model_hparams,
vocab_size):
"""Loss for class label."""
del targets # unused arg
with tf.variable_scope(
"softmax_max_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.reduce_max(x, axis=1, keepdims=True)
return tf.layers.dense(x, vocab_size)
@is_pointwise
def symbol_top(body_output, targets, model_hparams, vocab_size):
"""Generate logits.
Args:
body_output: A Tensor with shape
[batch, p0, p1, model_hparams.hidden_size].
targets: Unused.
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].
"""
del targets # unused arg
if model_hparams.shared_embedding_and_softmax_weights:
scope_name = "shared"
reuse = tf.AUTO_REUSE
else:
scope_name = "softmax"
reuse = False
with tf.variable_scope(scope_name, reuse=reuse):
body_output_shape = common_layers.shape_list(body_output)
var = get_weights(model_hparams, vocab_size, body_output_shape[-1])
if (model_hparams.factored_logits and
model_hparams.mode == tf.estimator.ModeKeys.TRAIN):
# insert channels dimension
body_output = tf.expand_dims(body_output, 3)
return common_layers.FactoredTensor(body_output, var)
else:
body_output = tf.reshape(body_output, [-1, body_output_shape[-1]])
logits = tf.matmul(body_output, var, transpose_b=True)
return tf.reshape(logits,
body_output_shape[:-1] + [1, vocab_size])
@is_pointwise
def symbol_one_hot_top(body_output, targets, model_hparams, vocab_size):
del targets, model_hparams, vocab_size # unused arg
return body_output
def video_top(body_output, targets, model_hparams, vocab_size):
"""Top transformation for video."""
del targets # unused arg
num_channels = model_hparams.problem.num_channels
shape = common_layers.shape_list(body_output)
reshape_shape = shape[:-1] + [num_channels, vocab_size]
res = tf.reshape(body_output, reshape_shape)
# Calculate argmax so as to have a summary with the produced images.
x = tf.argmax(tf.reshape(res, [-1, vocab_size]), axis=-1)
x = tf.reshape(x, shape[:-1] + [num_channels])
common_video.gif_summary("results", x, max_outputs=1)
return res
def video_l1_top(body_output, targets, model_hparams, vocab_size):
"""Top transformation for video."""
del targets, vocab_size # unused arg
num_channels = model_hparams.problem.num_channels
num_frames = model_hparams.video_num_target_frames
with tf.variable_scope("rgb"):
body_output_shape = common_layers.shape_list(body_output)
res = tf.layers.dense(body_output, num_channels * num_frames, name="cast")
res = tf.reshape(res, body_output_shape[:3] + [num_channels, num_frames])
res = tf.transpose(res, [0, 4, 1, 2, 3]) # Move frames next to batch.
if not tf.get_variable_scope().reuse:
res_argmax = res[:, -1, :, :, :]
tf.summary.image(
"result",
common_layers.tpu_safe_image_summary(res_argmax),
max_outputs=1)
return tf.expand_dims(res, axis=-1) # Add an axis like in perplexity.
def video_raw_top(body_output, targets, model_hparams, vocab_size):
del targets, model_hparams, vocab_size # unused arg
frames = body_output
if isinstance(body_output, list):
frames = tf.stack(body_output, axis=1)
rgb_frames = common_layers.convert_real_to_rgb(frames)
common_video.gif_summary("body_output", rgb_frames)
return tf.expand_dims(rgb_frames, axis=-1)
# Utility functions similar to tf.keras for default transformations
def get_bottom(modality_type, value=None):
"""Gets default bottom transformation; if none available, return value."""
if modality_type == ModalityType.AUDIO:
return audio_bottom
elif modality_type == ModalityType.AUDIO_SPECTRAL:
return audio_spectral_bottom
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SIGMOID_CLASS_LABEL,
ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL,
ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL):
return class_label_bottom
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return symbol_bottom
elif modality_type in (ModalityType.GENERIC_L2_LOSS,
ModalityType.IDENTITY,
ModalityType.IDENTITY_SYMBOL,
ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM):
return identity_bottom
elif modality_type == ModalityType.IMAGE:
return image_bottom
elif modality_type in (ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.IMAGE_CHANNEL_COMPRESS):
return image_channel_compress_bottom
elif modality_type in (ModalityType.REAL,
ModalityType.REAL_L2_LOSS,
ModalityType.REAL_LOG_POISSON_LOSS):
return real_bottom
elif modality_type == ModalityType.SPEECH_RECOGNITION:
return speech_recognition_bottom
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_bottom
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_L1,
ModalityType.VIDEO_L2):
return video_bottom
elif modality_type == ModalityType.VIDEO_BITWISE:
return video_bitwise_bottom
elif modality_type == ModalityType.VIDEO_IDENTITY:
return video_identity_bottom
elif modality_type in (ModalityType.VIDEO_L1_RAW,
ModalityType.VIDEO_L2_RAW):
return video_raw_bottom
elif modality_type == ModalityType.VIDEO_PIXEL_NOISE:
return video_pixel_noise_bottom
return value
def get_loss(modality_type, value=None):
"""Gets default loss transformation; if none available, return value."""
if modality_type in (ModalityType.AUDIO,
ModalityType.AUDIO_SPECTRAL,
ModalityType.CLASS_LABEL,
ModalityType.IDENTITY,
ModalityType.IDENTITY_SYMBOL,
ModalityType.IMAGE,
ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.IMAGE_CHANNEL_COMPRESS,
ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM,
ModalityType.REAL,
ModalityType.SPEECH_RECOGNITION,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return generic_loss
elif modality_type == ModalityType.CTC_SYMBOL:
return ctc_symbol_loss
elif modality_type == ModalityType.GENERIC_L2_LOSS:
return generic_l2_loss
elif modality_type == ModalityType.MULTI_LABEL:
return multi_label_loss
elif modality_type in (ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL,
ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL):
return one_hot_class_label_loss
elif modality_type == ModalityType.REAL_L2_LOSS:
return real_l2_loss
elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS:
return real_log_poisson_loss
elif modality_type == ModalityType.SIGMOID_CLASS_LABEL:
return sigmoid_class_label_loss
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
return sigmoid_max_pooling_class_label_loss
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_loss
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_BITWISE,
ModalityType.VIDEO_PIXEL_NOISE):
return video_loss
elif modality_type == ModalityType.VIDEO_IDENTITY:
return video_identity_loss
elif modality_type == ModalityType.VIDEO_L1:
return video_l1_loss
elif modality_type == ModalityType.VIDEO_L1_RAW:
return video_l1_raw_loss
elif modality_type == ModalityType.VIDEO_L2:
return video_l2_loss
elif modality_type == ModalityType.VIDEO_L2_RAW:
return video_l2_raw_loss
return value
def get_name(modality_type, value=None):
"""Gets default name for transformations; if none available, return value."""
# For legacy reasons, modalities vary in their naming scheme. Future plans are
# to remove any need for get_name. We do not recommend using it.
if modality_type == ModalityType.AUDIO:
return lambda model_hparams, vocab_size: "audio_modality"
elif modality_type == ModalityType.AUDIO_SPECTRAL:
return lambda model_hparams, vocab_size: "audio_spectral_modality"
elif modality_type == ModalityType.GENERIC_L2_LOSS:
return lambda model_hparams, vocab_size: "generic_l2_loss_modality"
elif modality_type == ModalityType.IDENTITY:
return lambda model_hparams, vocab_size: "identity_modality"
elif modality_type == ModalityType.IMAGE:
return lambda model_hparams, vocab_size: "image_modality"
elif modality_type == ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY:
return (lambda model_hparams, vocab_size: # pylint: disable=g-long-lambda
"image_channel_bottom_identity_modality")
elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS:
return lambda model_hparams, vocab_size: "image_channel_compress_modality"
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return lambda model_hparams, vocab_size: "image_channel_embeddings_bottom"
elif modality_type == ModalityType.REAL:
return lambda model_hparams, vocab_size: "real_modality"
elif modality_type == ModalityType.REAL_L2_LOSS:
return lambda model_hparams, vocab_size: "real_l2_loss_modality"
elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS:
return lambda model_hparams, vocab_size: "real_log_poisson_loss_modality"
elif modality_type == ModalityType.SPEECH_RECOGNITION:
return lambda model_hparams, vocab_size: "speech_recognition_modality"
elif modality_type == ModalityType.VIDEO:
return lambda model_hparams, vocab_size: "video_modality"
elif modality_type == ModalityType.VIDEO_BITWISE:
return lambda model_hparams, vocab_size: "video_modality_bitwise"
elif modality_type == ModalityType.VIDEO_IDENTITY:
return lambda model_hparams, vocab_size: "video_modality_identity"
elif modality_type == ModalityType.VIDEO_L1:
return lambda model_hparams, vocab_size: "video_modality_l1"
elif modality_type == ModalityType.VIDEO_L1_RAW:
return lambda model_hparams, vocab_size: "video_modality_l1_raw"
elif modality_type == ModalityType.VIDEO_L2:
return lambda model_hparams, vocab_size: "video_modality_l2"
elif modality_type == ModalityType.VIDEO_L2_RAW:
return lambda model_hparams, vocab_size: "video_modality_l2_raw"
elif modality_type == ModalityType.VIDEO_PIXEL_NOISE:
return lambda model_hparams, vocab_size: "video_modality_pixel_noise"
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL):
def name(model_hparams, vocab_size):
return "class_label_modality_%d_%d" % (vocab_size,
model_hparams.hidden_size)
return name
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.IDENTITY_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL,
ModalityType.SYMBOL_ONE_HOT):
def name(model_hparams, vocab_size):
return "symbol_modality_%d_%d" % (vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SIGMOID_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "sigmoid_class_symbol_modality_%d_%d" % (vocab_size,
model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "sigmoid_max_pooling_class_symbol_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "softmax_average_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "softmax_last_timestep_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "softmax_max_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
return value
def get_targets_bottom(modality_type, value=None):
"""Gets default bottom transformation for targets; if none, return value."""
if modality_type == ModalityType.AUDIO:
return make_targets_bottom(audio_bottom)
elif modality_type == ModalityType.AUDIO_SPECTRAL:
return make_targets_bottom(audio_spectral_bottom)
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SIGMOID_CLASS_LABEL,
ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL,
ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL):
return class_label_targets_bottom
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return symbol_targets_bottom
elif modality_type in (ModalityType.GENERIC_L2_LOSS,
ModalityType.IDENTITY_SYMBOL):
return identity_bottom
elif modality_type == ModalityType.IDENTITY:
return make_targets_bottom(identity_bottom)
elif modality_type == ModalityType.IMAGE:
return image_targets_bottom
elif modality_type in (ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.IMAGE_CHANNEL_COMPRESS):
return image_channel_compress_targets_bottom
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return image_channel_embeddings_bottom
elif modality_type in (ModalityType.REAL,
ModalityType.REAL_L2_LOSS,
ModalityType.REAL_LOG_POISSON_LOSS):
return make_targets_bottom(real_bottom)
elif modality_type == ModalityType.SPEECH_RECOGNITION:
return make_targets_bottom(speech_recognition_bottom)
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_bottom
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_L1,
ModalityType.VIDEO_L2):
return video_targets_bottom
elif modality_type == ModalityType.VIDEO_BITWISE:
return video_bitwise_targets_bottom
elif modality_type == ModalityType.VIDEO_IDENTITY:
return video_identity_targets_bottom
elif modality_type in (ModalityType.VIDEO_L1_RAW,
ModalityType.VIDEO_L2_RAW):
return video_raw_targets_bottom
elif modality_type == ModalityType.VIDEO_PIXEL_NOISE:
return make_targets_bottom(video_pixel_noise_bottom)
return value
def get_top(modality_type, value=None):
"""Gets default top transformation; if none available, return value."""
if modality_type in (ModalityType.AUDIO,
ModalityType.AUDIO_SPECTRAL,
ModalityType.GENERIC_L2_LOSS,
ModalityType.IDENTITY,
ModalityType.IDENTITY_SYMBOL,
ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.SPEECH_RECOGNITION,
ModalityType.VIDEO_IDENTITY):
return identity_top
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SIGMOID_CLASS_LABEL):
return class_label_top
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return symbol_top
elif modality_type == ModalityType.IMAGE:
return image_top
elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS:
return image_channel_compress_top
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return image_channel_embeddings_top
elif modality_type in (ModalityType.REAL,
ModalityType.REAL_L2_LOSS,
ModalityType.REAL_LOG_POISSON_LOSS):
return real_top
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
return sigmoid_max_pooling_class_label_top
elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL:
return softmax_average_pooling_class_label_top
elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL:
return softmax_last_timestep_class_label_top
elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL:
return softmax_max_pooling_class_label_top
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_top
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_BITWISE,
ModalityType.VIDEO_PIXEL_NOISE):
return video_top
elif modality_type in (ModalityType.VIDEO_L1,
ModalityType.VIDEO_L2):
return video_l1_top
elif modality_type in (ModalityType.VIDEO_L1_RAW,
ModalityType.VIDEO_L2_RAW):
return video_raw_top
return value
def get_weights_fn(modality_type, value=None):
"""Gets default weights function; if none available, return value."""
if modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.IDENTITY_SYMBOL,
ModalityType.MULTI_LABEL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_ONE_HOT):
return common_layers.weights_nonzero
elif modality_type in ModalityType.get_choices():
return common_layers.weights_all
return value
| 59,324 | 38.39243 | 80 | py |
sequer | sequer-main/code/tensor2tensor/tensor2tensor/rl/batch_dqn_agent_test.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BatchDQNAgent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from absl import flags
from dopamine.agents.dqn import dqn_agent
from dopamine.discrete_domains import atari_lib
import numpy as np
from tensor2tensor.rl import dopamine_connector
import tensorflow as tf
FLAGS = flags.FLAGS
class BatchDQNAgentTest(tf.test.TestCase):
# TODO(kozak): add testStepTrain (and possibly other tests) from dopamine
# dqn_agent_test.py
def setUp(self):
super(BatchDQNAgentTest, self).setUp()
self._test_subdir = os.path.join('/tmp/dopamine_tests', 'ckpts')
shutil.rmtree(self._test_subdir, ignore_errors=True)
os.makedirs(self._test_subdir)
self.num_actions = 4
self.min_replay_history = 6
self.update_period = 2
self.target_update_period = 4
self.epsilon_decay_period = 90
self.epsilon_train = 0.05
self.observation_shape = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE
self.stack_size = dqn_agent.NATURE_DQN_STACK_SIZE
self.env_batch_size = 4
self.zero_state = np.zeros(
[self.env_batch_size, self.observation_shape[0],
self.observation_shape[1], self.stack_size])
def _create_test_agent(self, sess):
stack_size = self.stack_size
class MockDQNNetwork(tf.keras.Model):
"""The Keras network used in tests."""
def __init__(self, num_actions, **kwargs):
# This weights_initializer gives action 0 a higher weight, ensuring
# that it gets picked by the argmax.
super(MockDQNNetwork, self).__init__(**kwargs)
weights_initializer = np.tile(
np.arange(num_actions, 0, -1), (stack_size, 1))
self.layer = tf.keras.layers.Dense(
num_actions,
kernel_initializer=tf.constant_initializer(weights_initializer),
bias_initializer=tf.ones_initializer())
def call(self, state):
inputs = tf.constant(
np.zeros((state.shape[0], stack_size)), dtype=tf.float32)
return atari_lib.DQNNetworkType(self.layer((inputs)))
agent = dopamine_connector.BatchDQNAgent(
network=MockDQNNetwork,
replay_capacity=100,
buffer_batch_size=8,
generates_trainable_dones=True,
sess=sess,
env_batch_size=self.env_batch_size,
num_actions=self.num_actions,
min_replay_history=self.min_replay_history,
epsilon_fn=lambda w, x, y, z: 0.0, # No exploration.
update_period=self.update_period,
target_update_period=self.target_update_period,
epsilon_eval=0.0) # No exploration during evaluation.
# This ensures non-random action choices (since epsilon_eval = 0.0) and
# skips the train_step.
agent.eval_mode = True
sess.run(tf.global_variables_initializer())
return agent
def testCreateAgentWithDefaults(self):
# Verifies that we can create and train an agent with the default values.
with tf.Session() as sess:
agent = self._create_test_agent(sess)
sess.run(tf.global_variables_initializer())
observation = np.ones([84, 84, 1])
agent.begin_episode([observation])
agent.step(reward=[1], observation=[observation])
agent.end_episode(reward=[1])
def testBeginEpisode(self):
"""Test the functionality of agent.begin_episode.
Specifically, the action returned and its effect on state.
"""
with tf.Session() as sess:
agent = self._create_test_agent(sess)
# We fill up the state with 9s. On calling agent.begin_episode the state
# should be reset to all 0s.
agent.state_batch.fill(9)
first_observation = np.ones(
[self.env_batch_size, self.observation_shape[0],
self.observation_shape[1], 1])
self.assertTrue((agent.begin_episode(first_observation) == 0).all())
# When the all-1s observation is received, it will be placed at the end of
# the state.
expected_state = self.zero_state
expected_state[:, :, :, -1] = np.ones(
[self.env_batch_size, self.observation_shape[0],
self.observation_shape[1]])
self.assertAllEqual(agent.state_batch, expected_state)
self.assertAllEqual(agent._observation_batch, first_observation[..., 0])
# No training happens in eval mode.
self.assertEqual(agent.training_steps, 0)
# This will now cause training to happen.
agent.eval_mode = False
# Having a low replay memory add_count will prevent any of the
# train/prefetch/sync ops from being called.
agent._replay.memory.add_count = 0
second_observation = np.ones(
[self.env_batch_size, self.observation_shape[0],
self.observation_shape[1], 1]) * 2
agent.begin_episode(second_observation)
# The agent's state will be reset, so we will only be left with the all-2s
# observation.
expected_state[:, :, :, -1] = np.full(
(self.env_batch_size, self.observation_shape[0],
self.observation_shape[1]), 2
)
self.assertAllEqual(agent.state_batch, expected_state)
self.assertAllEqual(agent._observation_batch,
second_observation[:, :, :, 0])
# training_steps is incremented since we set eval_mode to False.
self.assertEqual(agent.training_steps, 1)
if __name__ == '__main__':
tf.test.main()
| 6,005 | 36.773585 | 80 | py |
nlp-architect | nlp-architect-master/setup.py | #!/usr/bin/env python
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import io
import os
from setuptools import find_packages, setup
root = os.path.abspath(os.path.dirname(__file__))
# required packages for NLP Architect
with open("requirements.txt") as fp:
reqs = []
for r in fp.readlines():
if "#" in r:
continue
reqs.append(r)
install_requirements = reqs
everything = [
"tensorflow_hub",
"elasticsearch",
"wordfreq",
"newspaper3k",
"pywikibot",
"num2words",
"bokeh",
"pandas",
"hyperopt",
"termcolor",
]
dev = [
"sphinx==1.8.5",
"sphinx_rtd_theme",
"flake8-html",
"black",
"pep8",
"flake8",
"pytest",
"pytest-cov",
"pytest-mock",
"pytest-xdist",
"pylint",
]
extras = {"all": everything, "dev": dev}
# read official README.md
with open("README.md", encoding="utf8") as fp:
long_desc = fp.read()
with io.open(os.path.join(root, "nlp_architect", "version.py"), encoding="utf8") as f:
version_f = {}
exec(f.read(), version_f) # pylint: disable=exec-used
version = version_f["NLP_ARCHITECT_VERSION"]
setup(
name="nlp-architect",
version=version,
description="Intel AI Lab NLP and NLU research model library",
long_description=long_desc,
long_description_content_type="text/markdown",
keywords="NLP NLU deep learning natural language processing tensorflow pytorch",
author="Intel AI Lab",
author_email="nlp_architect@intel.com",
url="https://github.com/IntelLabs/nlp-architect",
license="Apache 2.0",
python_requires=">=3.6.*",
packages=find_packages(
exclude=["tests.*", "tests", "server.*", "server", "examples.*", "examples", "solutions.*", "solutions"]
),
install_requires=install_requirements,
extras_require=extras,
scripts=["nlp_architect/nlp-train", "nlp_architect/nlp-inference"],
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: " + "Artificial Intelligence",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: " + "Python Modules",
"Topic :: Scientific/Engineering :: Information Analysis",
"Environment :: Console",
"Environment :: Web Environment",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
],
)
| 3,486 | 31.287037 | 112 | py |
nlp-architect | nlp-architect-master/examples/sparse_gnmt/gnmt/model_helper.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# Changes Made from original:
# import paths
# quantization operations
# pruning operations
# ******************************************************************************
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: skip-file
"""Utility functions for building models."""
from __future__ import print_function
import collections
import os
import time
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from .utils import misc_utils as utils, vocab_utils, iterator_utils
__all__ = [
"get_initializer",
"get_device_str",
"create_train_model",
"create_eval_model",
"create_infer_model",
"create_emb_for_encoder_and_decoder",
"create_rnn_cell",
"gradient_clip",
"create_or_load_model",
"load_model",
"avg_checkpoints",
"compute_perplexity",
]
# If a vocab size is greater than this value, put the embedding on cpu instead
VOCAB_SIZE_THRESHOLD_CPU = 50000
# Collection for all the tensors involved in the quantization process
_QUANTIZATION_COLLECTION = "qunatization"
def get_initializer(init_op, seed=None, init_weight=None):
"""Create an initializer. init_weight is only for uniform."""
if init_op == "uniform":
assert init_weight
return tf.random_uniform_initializer(-init_weight, init_weight, seed=seed)
elif init_op == "glorot_normal":
return tf.keras.initializers.glorot_normal(seed=seed)
elif init_op == "glorot_uniform":
return tf.keras.initializers.glorot_uniform(seed=seed)
else:
raise ValueError("Unknown init_op %s" % init_op)
def get_device_str(device_id, num_gpus):
"""Return a device string for multi-GPU setup."""
if num_gpus == 0:
return "/cpu:0"
device_str_output = "/gpu:%d" % (device_id % num_gpus)
return device_str_output
class ExtraArgs(
collections.namedtuple(
"ExtraArgs",
("single_cell_fn", "model_device_fn", "attention_mechanism_fn", "encoder_emb_lookup_fn"),
)
):
pass
class TrainModel(
collections.namedtuple("TrainModel", ("graph", "model", "iterator", "skip_count_placeholder"))
):
pass
def create_train_model(model_creator, hparams, scope=None, num_workers=1, jobid=0, extra_args=None):
"""Create train graph, model, and iterator."""
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "train"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab
)
src_dataset = tf.data.TextLineDataset(tf.gfile.Glob(src_file))
tgt_dataset = tf.data.TextLineDataset(tf.gfile.Glob(tgt_file))
skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size=hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
skip_count=skip_count_placeholder,
num_shards=num_workers,
shard_index=jobid,
use_char_encode=hparams.use_char_encode,
)
# Note: One can set model_device_fn to
# `tf.train.replica_device_setter(ps_tasks)` for distributed training.
model_device_fn = None
if extra_args:
model_device_fn = extra_args.model_device_fn
with tf.device(model_device_fn):
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.TRAIN,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
scope=scope,
extra_args=extra_args,
)
return TrainModel(
graph=graph, model=model, iterator=iterator, skip_count_placeholder=skip_count_placeholder
)
class EvalModel(
collections.namedtuple(
"EvalModel", ("graph", "model", "src_file_placeholder", "tgt_file_placeholder", "iterator")
)
):
pass
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
"""Create train graph, model, src/tgt file holders, and iterator."""
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab
)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK
)
src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
src_dataset = tf.data.TextLineDataset(src_file_placeholder)
tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer,
use_char_encode=hparams.use_char_encode,
)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args,
)
return EvalModel(
graph=graph,
model=model,
src_file_placeholder=src_file_placeholder,
tgt_file_placeholder=tgt_file_placeholder,
iterator=iterator,
)
class InferModel(
collections.namedtuple(
"InferModel", ("graph", "model", "src_placeholder", "batch_size_placeholder", "iterator")
)
):
pass
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab
)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK
)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer,
use_char_encode=hparams.use_char_encode,
)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args,
)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator,
)
def _get_embed_device(vocab_size):
"""Decide on which device to place an embed matrix given its vocab size."""
if vocab_size > VOCAB_SIZE_THRESHOLD_CPU:
return "/cpu:0"
else:
return "/gpu:0"
def _create_pretrained_emb_from_txt(
vocab_file, embed_file, num_trainable_tokens=3, dtype=tf.float32, scope=None
):
"""Load pretrain embeding from embed_file, and return an embedding matrix.
Args:
embed_file: Path to a Glove formated embedding txt file.
num_trainable_tokens: Make the first n tokens in the vocab file as trainable
variables. Default is 3, which is "<unk>", "<s>" and "</s>".
"""
vocab, _ = vocab_utils.load_vocab(vocab_file)
trainable_tokens = vocab[:num_trainable_tokens]
utils.print_out("# Using pretrained embedding: %s." % embed_file)
utils.print_out(" with trainable tokens: ")
emb_dict, emb_size = vocab_utils.load_embed_txt(embed_file)
for token in trainable_tokens:
utils.print_out(" %s" % token)
if token not in emb_dict:
emb_dict[token] = [0.0] * emb_size
emb_mat = np.array([emb_dict[token] for token in vocab], dtype=dtype.as_numpy_dtype())
emb_mat = tf.constant(emb_mat)
emb_mat_const = tf.slice(emb_mat, [num_trainable_tokens, 0], [-1, -1])
with tf.variable_scope(scope or "pretrain_embeddings", dtype=dtype) as scope:
with tf.device(_get_embed_device(num_trainable_tokens)):
emb_mat_var = tf.get_variable("emb_mat_var", [num_trainable_tokens, emb_size])
return tf.concat([emb_mat_var, emb_mat_const], 0)
def _create_or_load_embed(
embed_name, vocab_file, embed_file, vocab_size, embed_size, dtype, embed_type="dense"
):
"""Create a new or load an existing embedding matrix."""
if vocab_file and embed_file:
embedding = _create_pretrained_emb_from_txt(vocab_file, embed_file)
else:
with tf.device(_get_embed_device(vocab_size)):
if embed_type == "dense":
embedding = tf.get_variable(embed_name, [vocab_size, embed_size], dtype)
elif embed_type == "sparse":
embedding = tf.get_variable(embed_name, [vocab_size, embed_size], dtype)
embedding = tf.contrib.model_pruning.apply_mask(embedding, embed_name)
else:
raise ValueError("Unknown embedding type %s!" % embed_type)
return embedding
def create_emb_for_encoder_and_decoder(
share_vocab,
src_vocab_size,
tgt_vocab_size,
src_embed_size,
tgt_embed_size,
embed_type="dense",
dtype=tf.float32,
num_enc_partitions=0,
num_dec_partitions=0,
src_vocab_file=None,
tgt_vocab_file=None,
src_embed_file=None,
tgt_embed_file=None,
use_char_encode=False,
scope=None,
):
"""Create embedding matrix for both encoder and decoder.
Args:
share_vocab: A boolean. Whether to share embedding matrix for both
encoder and decoder.
src_vocab_size: An integer. The source vocab size.
tgt_vocab_size: An integer. The target vocab size.
src_embed_size: An integer. The embedding dimension for the encoder's
embedding.
tgt_embed_size: An integer. The embedding dimension for the decoder's
embedding.
dtype: dtype of the embedding matrix. Default to float32.
num_enc_partitions: number of partitions used for the encoder's embedding
vars.
num_dec_partitions: number of partitions used for the decoder's embedding
vars.
scope: VariableScope for the created subgraph. Default to "embedding".
Returns:
embedding_encoder: Encoder's embedding matrix.
embedding_decoder: Decoder's embedding matrix.
Raises:
ValueError: if use share_vocab but source and target have different vocab
size.
"""
if num_enc_partitions <= 1:
enc_partitioner = None
else:
# Note: num_partitions > 1 is required for distributed training due to
# embedding_lookup tries to colocate single partition-ed embedding variable
# with lookup ops. This may cause embedding variables being placed on worker
# jobs.
enc_partitioner = tf.fixed_size_partitioner(num_enc_partitions)
if num_dec_partitions <= 1:
dec_partitioner = None
else:
# Note: num_partitions > 1 is required for distributed training due to
# embedding_lookup tries to colocate single partition-ed embedding variable
# with lookup ops. This may cause embedding variables being placed on worker
# jobs.
dec_partitioner = tf.fixed_size_partitioner(num_dec_partitions)
if src_embed_file and enc_partitioner:
raise ValueError(
"Can't set num_enc_partitions > 1 when using pretrained encoder " "embedding"
)
if tgt_embed_file and dec_partitioner:
raise ValueError(
"Can't set num_dec_partitions > 1 when using pretrained decdoer " "embedding"
)
with tf.variable_scope(scope or "embeddings", dtype=dtype, partitioner=enc_partitioner):
# Share embedding
if share_vocab:
if src_vocab_size != tgt_vocab_size:
raise ValueError(
"Share embedding but different src/tgt vocab sizes"
" %d vs. %d" % (src_vocab_size, tgt_vocab_size)
)
assert src_embed_size == tgt_embed_size
utils.print_out("# Use the same embedding for source and target")
vocab_file = src_vocab_file or tgt_vocab_file
embed_file = src_embed_file or tgt_embed_file
embedding_encoder = _create_or_load_embed(
"embedding_share",
vocab_file,
embed_file,
src_vocab_size,
src_embed_size,
dtype,
embed_type=embed_type,
)
embedding_decoder = embedding_encoder
else:
if not use_char_encode:
with tf.variable_scope("encoder", partitioner=enc_partitioner):
embedding_encoder = _create_or_load_embed(
"embedding_encoder",
src_vocab_file,
src_embed_file,
src_vocab_size,
src_embed_size,
dtype,
embed_type=embed_type,
)
else:
embedding_encoder = None
with tf.variable_scope("decoder", partitioner=dec_partitioner):
embedding_decoder = _create_or_load_embed(
"embedding_decoder",
tgt_vocab_file,
tgt_embed_file,
tgt_vocab_size,
tgt_embed_size,
dtype,
embed_type=embed_type,
)
return embedding_encoder, embedding_decoder
def _single_cell(
unit_type,
num_units,
forget_bias,
dropout,
mode,
residual_connection=False,
device_str=None,
residual_fn=None,
):
"""Create an instance of a single RNN cell."""
# dropout (= 1 - keep_prob) is set to 0 during eval and infer
dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0
# Cell Type
if unit_type == "lstm":
utils.print_out(" LSTM, forget_bias=%g" % forget_bias, new_line=False)
single_cell = tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=forget_bias)
elif unit_type == "gru":
utils.print_out(" GRU", new_line=False)
single_cell = tf.contrib.rnn.GRUCell(num_units)
elif unit_type == "layer_norm_lstm":
utils.print_out(" Layer Normalized LSTM, forget_bias=%g" % forget_bias, new_line=False)
single_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units, forget_bias=forget_bias, layer_norm=True
)
elif unit_type == "nas":
utils.print_out(" NASCell", new_line=False)
single_cell = tf.contrib.rnn.NASCell(num_units)
elif unit_type == "mlstm":
utils.print_out(" Masked_LSTM, forget_bias=%g" % forget_bias, new_line=False)
single_cell = tf.contrib.model_pruning.MaskedBasicLSTMCell(
num_units, forget_bias=forget_bias
)
else:
raise ValueError("Unknown unit type %s!" % unit_type)
# Dropout (= 1 - keep_prob)
if dropout > 0.0:
single_cell = tf.contrib.rnn.DropoutWrapper(
cell=single_cell, input_keep_prob=(1.0 - dropout)
)
utils.print_out(" %s, dropout=%g " % (type(single_cell).__name__, dropout), new_line=False)
# Residual
if residual_connection:
single_cell = tf.contrib.rnn.ResidualWrapper(single_cell, residual_fn=residual_fn)
utils.print_out(" %s" % type(single_cell).__name__, new_line=False)
# Device Wrapper
if device_str:
single_cell = tf.contrib.rnn.DeviceWrapper(single_cell, device_str)
utils.print_out(
" %s, device=%s" % (type(single_cell).__name__, device_str), new_line=False
)
return single_cell
def _cell_list(
unit_type,
num_units,
num_layers,
num_residual_layers,
forget_bias,
dropout,
mode,
num_gpus,
base_gpu=0,
single_cell_fn=None,
residual_fn=None,
):
"""Create a list of RNN cells."""
if not single_cell_fn:
single_cell_fn = _single_cell
# Multi-GPU
cell_list = []
for i in range(num_layers):
utils.print_out(" cell %d" % i, new_line=False)
single_cell = single_cell_fn(
unit_type=unit_type,
num_units=num_units,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
residual_connection=(i >= num_layers - num_residual_layers),
device_str=get_device_str(i + base_gpu, num_gpus),
residual_fn=residual_fn,
)
utils.print_out("")
cell_list.append(single_cell)
return cell_list
def create_rnn_cell(
unit_type,
num_units,
num_layers,
num_residual_layers,
forget_bias,
dropout,
mode,
num_gpus,
base_gpu=0,
single_cell_fn=None,
):
"""Create multi-layer RNN cell.
Args:
unit_type: string representing the unit type, i.e. "lstm".
num_units: the depth of each unit.
num_layers: number of cells.
num_residual_layers: Number of residual layers from top to bottom. For
example, if `num_layers=4` and `num_residual_layers=2`, the last 2 RNN
cells in the returned list will be wrapped with `ResidualWrapper`.
forget_bias: the initial forget bias of the RNNCell(s).
dropout: floating point value between 0.0 and 1.0:
the probability of dropout. this is ignored if `mode != TRAIN`.
mode: either tf.contrib.learn.TRAIN/EVAL/INFER
num_gpus: The number of gpus to use when performing round-robin
placement of layers.
base_gpu: The gpu device id to use for the first RNN cell in the
returned list. The i-th RNN cell will use `(base_gpu + i) % num_gpus`
as its device id.
single_cell_fn: allow for adding customized cell.
When not specified, we default to model_helper._single_cell
Returns:
An `RNNCell` instance.
"""
cell_list = _cell_list(
unit_type=unit_type,
num_units=num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
num_gpus=num_gpus,
base_gpu=base_gpu,
single_cell_fn=single_cell_fn,
)
if len(cell_list) == 1: # Single layer.
return cell_list[0]
else: # Multi layers
return tf.contrib.rnn.MultiRNNCell(cell_list)
def gradient_clip(gradients, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = tf.clip_by_global_norm(gradients, max_gradient_norm)
gradient_norm_summary = [tf.summary.scalar("grad_norm", gradient_norm)]
gradient_norm_summary.append(
tf.summary.scalar("clipped_gradient", tf.global_norm(clipped_gradients))
)
return clipped_gradients, gradient_norm_summary, gradient_norm
def print_variables_in_ckpt(ckpt_path):
"""Print a list of variables in a checkpoint together with their shapes."""
utils.print_out("# Variables in ckpt %s" % ckpt_path)
reader = tf.train.NewCheckpointReader(ckpt_path)
variable_map = reader.get_variable_to_shape_map()
for key in sorted(variable_map.keys()):
utils.print_out(" %s: %s" % (key, variable_map[key]))
def load_model(model, ckpt_path, session, name):
"""Load model from a checkpoint."""
start_time = time.time()
try:
model.saver.restore(session, ckpt_path)
except tf.errors.NotFoundError as e:
utils.print_out("Can't load checkpoint")
print_variables_in_ckpt(ckpt_path)
utils.print_out("%s" % str(e))
session.run(tf.tables_initializer())
utils.print_out(
" loaded %s model parameters from %s, time %.2fs"
% (name, ckpt_path, time.time() - start_time)
)
return model
def load_quantized_model(model, ckpt_path, session, name):
"""Loads quantized model and dequantizes variables"""
start_time = time.time()
dequant_ops = []
for tsr in tf.trainable_variables():
with tf.variable_scope(tsr.name.split(":")[0], reuse=True):
quant_tsr = tf.get_variable("quantized", dtype=tf.qint8)
min_range = tf.get_variable("min_range")
max_range = tf.get_variable("max_range")
dequant_ops.append(tsr.assign(tf.dequantize(quant_tsr, min_range, max_range, "SCALED")))
restore_list = [tsr for tsr in tf.global_variables() if tsr not in tf.trainable_variables()]
saver = tf.train.Saver(restore_list)
try:
saver.restore(session, ckpt_path)
except tf.errors.NotFoundError as e:
utils.print_out("Can't load checkpoint")
print_variables_in_ckpt(ckpt_path)
utils.print_out("%s" % str(e))
session.run(tf.tables_initializer())
session.run(dequant_ops)
utils.print_out(
" loaded %s model parameters from %s, time %.2fs"
% (name, ckpt_path, time.time() - start_time)
)
return model
def add_quatization_variables(model):
"""Add to graph quantization variables"""
with model.graph.as_default():
for tsr in tf.trainable_variables():
with tf.variable_scope(tsr.name.split(":")[0]):
output, min_range, max_range = tf.quantize(
tsr, tf.reduce_min(tsr), tf.reduce_max(tsr), tf.qint8, mode="SCALED"
)
tf.get_variable(
"quantized",
initializer=output,
trainable=False,
collections=[_QUANTIZATION_COLLECTION],
)
tf.get_variable(
"min_range",
initializer=min_range,
trainable=False,
collections=[_QUANTIZATION_COLLECTION],
)
tf.get_variable(
"max_range",
initializer=max_range,
trainable=False,
collections=[_QUANTIZATION_COLLECTION],
)
def quantize_checkpoint(session, ckpt_path):
"""Quantize current loaded model and saves checkpoint in ckpt_path"""
save_list = [tsr for tsr in tf.global_variables() if tsr not in tf.trainable_variables()]
saver = tf.train.Saver(save_list)
session.run(tf.variables_initializer(tf.get_collection(_QUANTIZATION_COLLECTION)))
saver.save(session, ckpt_path)
utils.print_out("Saved quantized checkpoint as %s" % ckpt_path)
def avg_checkpoints(model_dir, num_last_checkpoints, global_step, global_step_name):
"""Average the last N checkpoints in the model_dir."""
checkpoint_state = tf.train.get_checkpoint_state(model_dir)
if not checkpoint_state:
utils.print_out("# No checkpoint file found in directory: %s" % model_dir)
return None
# Checkpoints are ordered from oldest to newest.
checkpoints = checkpoint_state.all_model_checkpoint_paths[-num_last_checkpoints:]
if len(checkpoints) < num_last_checkpoints:
utils.print_out(
"# Skipping averaging checkpoints because not enough checkpoints is " "avaliable."
)
return None
avg_model_dir = os.path.join(model_dir, "avg_checkpoints")
if not tf.gfile.Exists(avg_model_dir):
utils.print_out(
"# Creating new directory %s for saving averaged checkpoints." % avg_model_dir
)
tf.gfile.MakeDirs(avg_model_dir)
utils.print_out("# Reading and averaging variables in checkpoints:")
var_list = tf.contrib.framework.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if name != global_step_name:
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
utils.print_out(" %s" % checkpoint)
reader = tf.contrib.framework.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
for name in var_values:
var_values[name] /= len(checkpoints)
# Build a graph with same variables in the checkpoints, and save the averaged
# variables into the avg_model_dir.
with tf.Graph().as_default():
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
tf.Variable(global_step, name=global_step_name, trainable=False)
saver = tf.train.Saver(tf.all_variables())
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for p, assign_op, (name, value) in zip(
placeholders, assign_ops, six.iteritems(var_values)
):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint. Only keep 1
# checkpoint and the best checkpoint will be moved to avg_best_metric_dir.
saver.save(sess, os.path.join(avg_model_dir, "translate.ckpt"))
return avg_model_dir
def create_or_load_model(model, model_dir, session, name):
"""Create translation model and initialize or load parameters in session."""
latest_ckpt = tf.train.latest_checkpoint(model_dir)
if latest_ckpt:
model = load_model(model, latest_ckpt, session, name)
else:
start_time = time.time()
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
utils.print_out(
" created %s model with fresh parameters, time %.2fs"
% (name, time.time() - start_time)
)
global_step = model.global_step.eval(session=session)
return model, global_step
def compute_perplexity(model, sess, name):
"""Compute perplexity of the output of the model.
Args:
model: model for compute perplexity.
sess: tensorflow session to use.
name: name of the batch.
Returns:
The perplexity of the eval outputs.
"""
total_loss = 0
total_predict_count = 0
start_time = time.time()
while True:
try:
output_tuple = model.eval(sess)
total_loss += output_tuple.eval_loss * output_tuple.batch_size
total_predict_count += output_tuple.predict_count
except tf.errors.OutOfRangeError:
break
perplexity = utils.safe_exp(total_loss / total_predict_count)
utils.print_time(" eval %s: perplexity %.2f" % (name, perplexity), start_time)
return perplexity
| 29,803 | 35.346341 | 100 | py |
nlp-architect | nlp-architect-master/examples/intent_extraction/train_mtl_model.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import pickle
from os import path
from tensorflow.python.keras.utils import to_categorical
from nlp_architect.nn.tensorflow.python.keras.callbacks import ConllCallback
from nlp_architect.data.intent_datasets import SNIPS
from nlp_architect.models.intent_extraction import MultiTaskIntentModel
from nlp_architect.utils.embedding import get_embedding_matrix, load_word_embeddings
from nlp_architect.utils.generic import one_hot
from nlp_architect.utils.io import (
validate,
validate_existing_directory,
validate_existing_filepath,
validate_parent_exists,
)
from nlp_architect.utils.metrics import get_conll_scores
def validate_input_args():
global model_path
validate((args.b, int, 1, 100000000))
validate((args.e, int, 1, 100000000))
validate((args.sentence_length, int, 1, 10000))
validate((args.token_emb_size, int, 1, 10000))
validate((args.intent_hidden_size, int, 1, 10000))
validate((args.lstm_hidden_size, int, 1, 10000))
validate((args.tagger_dropout, float, 0, 1))
model_path = path.join(path.dirname(path.realpath(__file__)), str(args.model_path))
validate_parent_exists(model_path)
model_info_path = path.join(path.dirname(path.realpath(__file__)), str(args.model_info_path))
validate_parent_exists(model_info_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-b", type=int, default=10, help="Batch size")
parser.add_argument("-e", type=int, default=10, help="Number of epochs")
parser.add_argument(
"--dataset_path", type=validate_existing_directory, required=True, help="dataset directory"
)
parser.add_argument("--sentence_length", type=int, default=30, help="Max sentence length")
parser.add_argument(
"--token_emb_size", type=int, default=100, help="Token features embedding vector size"
)
parser.add_argument(
"--intent_hidden_size", type=int, default=100, help="Intent detection LSTM hidden size"
)
parser.add_argument(
"--lstm_hidden_size", type=int, default=150, help="Slot tags LSTM hidden size"
)
parser.add_argument("--tagger_dropout", type=float, default=0.5, help="Slot tags dropout value")
parser.add_argument(
"--embedding_model",
type=validate_existing_filepath,
help="Path to word embedding model file",
)
parser.add_argument(
"--use_cudnn", default=False, action="store_true", help="use CUDNN based LSTM cells"
)
parser.add_argument("--model_path", type=str, default="model.h5", help="Model file path")
parser.add_argument(
"--model_info_path",
type=str,
default="model_info.dat",
help="Path for saving model topology",
)
args = parser.parse_args()
validate_input_args()
# load dataset
print("Loading dataset")
dataset = SNIPS(path=args.dataset_path, sentence_length=args.sentence_length)
train_x, train_char, train_i, train_y = dataset.train_set
test_x, test_char, test_i, test_y = dataset.test_set
test_y = to_categorical(test_y, dataset.label_vocab_size)
train_y = to_categorical(train_y, dataset.label_vocab_size)
train_i = one_hot(train_i, len(dataset.intents_vocab))
test_i = one_hot(test_i, len(dataset.intents_vocab))
train_inputs = [train_x, train_char]
train_outs = [train_i, train_y]
test_inputs = [test_x, test_char]
test_outs = [test_i, test_y]
print("Building model")
model = MultiTaskIntentModel(use_cudnn=args.use_cudnn)
model.build(
dataset.word_len,
dataset.label_vocab_size,
dataset.intent_size,
dataset.word_vocab_size,
dataset.char_vocab_size,
word_emb_dims=args.token_emb_size,
tagger_lstm_dims=args.lstm_hidden_size,
dropout=args.tagger_dropout,
)
# initialize word embedding if external model selected
if args.embedding_model is not None:
print("Loading external word embedding")
embedding_model, _ = load_word_embeddings(args.embedding_model)
embedding_mat = get_embedding_matrix(
embedding_model, dataset.word_vocab, dataset.word_vocab_size
)
model.load_embedding_weights(embedding_mat)
conll_cb = ConllCallback(test_inputs, test_y, dataset.tags_vocab.vocab, batch_size=args.b)
# train model
model.fit(
x=train_inputs,
y=train_outs,
batch_size=args.b,
epochs=args.e,
validation=(test_inputs, test_outs),
callbacks=[conll_cb],
)
print("Training done")
print("Saving model")
model.save(args.model_path)
with open(args.model_info_path, "wb") as fp:
info = {
"type": "mtl",
"tags_vocab": dataset.tags_vocab.vocab,
"word_vocab": dataset.word_vocab.vocab,
"char_vocab": dataset.char_vocab.vocab,
"intent_vocab": dataset.intents_vocab.vocab,
}
pickle.dump(info, fp)
# test performance
predictions = model.predict(test_inputs, batch_size=args.b)
eval = get_conll_scores(
predictions, test_y, {v: k for k, v in dataset.tags_vocab.vocab.items()}
)
print(eval)
| 6,041 | 37 | 100 | py |
nlp-architect | nlp-architect-master/examples/intent_extraction/train_seq2seq_model.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import pickle
from os import path
from tensorflow.python.keras.utils import to_categorical
from nlp_architect.nn.tensorflow import ConllCallback
from nlp_architect.data.intent_datasets import SNIPS
from nlp_architect.models.intent_extraction import Seq2SeqIntentModel
from nlp_architect.utils.io import (
validate,
validate_existing_directory,
validate_existing_filepath,
validate_parent_exists,
)
from nlp_architect.utils.metrics import get_conll_scores
def validate_input_args():
global model_path
validate((args.b, int, 1, 100000000))
validate((args.e, int, 1, 100000000))
validate((args.sentence_length, int, 1, 10000))
validate((args.token_emb_size, int, 1, 10000))
validate((args.lstm_hidden_size, int, 1, 10000))
validate((args.encoder_depth, int, 1, 10))
validate((args.decoder_depth, int, 1, 10))
validate((args.encoder_dropout, float, 0, 1))
validate((args.decoder_dropout, float, 0, 1))
model_path = path.join(path.dirname(path.realpath(__file__)), str(args.model_path))
validate_parent_exists(model_path)
model_info_path = path.join(path.dirname(path.realpath(__file__)), str(args.model_info_path))
validate_parent_exists(model_info_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-b", type=int, default=10, help="Batch size")
parser.add_argument("-e", type=int, default=10, help="Number of epochs")
parser.add_argument(
"--dataset_path", type=validate_existing_directory, required=True, help="dataset directory"
)
parser.add_argument("--sentence_length", type=int, default=30, help="Max sentence length")
parser.add_argument(
"--token_emb_size", type=int, default=100, help="Token features embedding vector size"
)
parser.add_argument(
"--lstm_hidden_size", type=int, default=150, help="Encoder LSTM hidden size"
)
parser.add_argument("--encoder_depth", type=int, default=1, help="Encoder LSTM depth")
parser.add_argument("--decoder_depth", type=int, default=1, help="Decoder LSTM depth")
parser.add_argument("--encoder_dropout", type=float, default=0.5, help="Encoder dropout value")
parser.add_argument("--decoder_dropout", type=float, default=0.5, help="Decoder dropout value")
parser.add_argument(
"--embedding_model",
type=validate_existing_filepath,
help="Path to word embedding model file",
)
parser.add_argument("--model_path", type=str, default="model.h5", help="Model file path")
parser.add_argument(
"--model_info_path",
type=str,
default="model_info.dat",
help="Path for saving model topology",
)
args = parser.parse_args()
validate_input_args()
dataset = SNIPS(path=args.dataset_path, sentence_length=args.sentence_length)
train_x, _, train_i, train_y = dataset.train_set
test_x, _, test_i, test_y = dataset.test_set
test_y = to_categorical(test_y, dataset.label_vocab_size)
train_y = to_categorical(train_y, dataset.label_vocab_size)
model = Seq2SeqIntentModel()
model.build(
dataset.word_vocab_size,
dataset.label_vocab_size,
args.token_emb_size,
args.encoder_depth,
args.decoder_depth,
args.lstm_hidden_size,
args.encoder_dropout,
args.decoder_dropout,
)
conll_cb = ConllCallback(test_x, test_y, dataset.tags_vocab.vocab, batch_size=args.b)
# train model
model.fit(
x=train_x,
y=train_y,
batch_size=args.b,
epochs=args.e,
validation=(test_x, test_y),
callbacks=[conll_cb],
)
print("Training done.")
print("Saving model")
model.save(args.model_path)
with open(args.model_info_path, "wb") as fp:
info = {
"type": "seq2seq",
"tags_vocab": dataset.tags_vocab.vocab,
"word_vocab": dataset.word_vocab.vocab,
"char_vocab": dataset.char_vocab.vocab,
"intent_vocab": dataset.intents_vocab.vocab,
}
pickle.dump(info, fp)
# test performance
predictions = model.predict(test_x, batch_size=args.b)
eval = get_conll_scores(
predictions, test_y, {v: k for k, v in dataset.tags_vocab.vocab.items()}
)
print(eval)
| 5,153 | 36.620438 | 99 | py |
nlp-architect | nlp-architect-master/examples/supervised_sentiment/example_ensemble.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
This example uses the Amazon reviews though additional datasets can easily be substituted.
It only requires text and a sentiment label
It then takes the dataset and trains two models (again can be expanded)
The labels for the test data is then predicted.
The same train and test data is used for both models
The ensembler takes the two prediction matrixes and weights (as defined by model accuracy)
and determines the final prediction matrix.
Finally, the full classification report is displayed.
A similar pipeline could be utilized to train models on a dataset, predict on a second dataset
and aquire a list of final predictions
"""
import argparse
import numpy as np
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
# pylint: disable=no-name-in-module
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.preprocessing.text import Tokenizer
from examples.supervised_sentiment.amazon_reviews import Amazon_Reviews
from examples.supervised_sentiment.ensembler import simple_ensembler
from nlp_architect.utils.generic import to_one_hot
from nlp_architect.utils.io import validate_existing_filepath, check_size
from .supervised_sentiment import simple_lstm, one_hot_cnn
max_fatures = 2000
max_len = 300
batch_size = 32
embed_dim = 256
lstm_out = 140
def ensemble_models(data, args):
# split, train, test
data.process()
dense_out = len(data.labels[0])
# split for all models
X_train_, X_test_, Y_train, Y_test = train_test_split(
data.text, data.labels, test_size=0.20, random_state=42
)
# Prep data for the LSTM model
tokenizer = Tokenizer(num_words=max_fatures, split=" ")
tokenizer.fit_on_texts(X_train_)
X_train = tokenizer.texts_to_sequences(X_train_)
X_train = pad_sequences(X_train, maxlen=max_len)
X_test = tokenizer.texts_to_sequences(X_test_)
X_test = pad_sequences(X_test, maxlen=max_len)
# Train the LSTM model
lstm_model = simple_lstm(max_fatures, dense_out, X_train.shape[1], embed_dim, lstm_out)
model_hist = lstm_model.fit(
X_train,
Y_train,
epochs=args.epochs,
batch_size=batch_size,
verbose=1,
validation_data=(X_test, Y_test),
)
lstm_acc = model_hist.history["acc"][-1]
print("LSTM model accuracy ", lstm_acc)
# And make predictions using the LSTM model
lstm_predictions = lstm_model.predict(X_test)
# Now prep data for the one-hot CNN model
X_train_cnn = np.asarray([to_one_hot(x) for x in X_train_])
X_test_cnn = np.asarray([to_one_hot(x) for x in X_test_])
# And train the one-hot CNN classifier
model_cnn = one_hot_cnn(dense_out, max_len)
model_hist_cnn = model_cnn.fit(
X_train_cnn,
Y_train,
batch_size=batch_size,
epochs=args.epochs,
verbose=1,
validation_data=(X_test_cnn, Y_test),
)
cnn_acc = model_hist_cnn.history["acc"][-1]
print("CNN model accuracy: ", cnn_acc)
# And make predictions
one_hot_cnn_predictions = model_cnn.predict(X_test_cnn)
# Using the accuracies create an ensemble
accuracies = [lstm_acc, cnn_acc]
norm_accuracies = [a / sum(accuracies) for a in accuracies]
print("Ensembling with weights: ")
for na in norm_accuracies:
print(na)
ensembled_predictions = simple_ensembler(
[lstm_predictions, one_hot_cnn_predictions], norm_accuracies
)
final_preds = np.argmax(ensembled_predictions, axis=1)
# Get the final accuracy
print(
classification_report(
np.argmax(Y_test, axis=1), final_preds, target_names=data.labels_0.columns.values
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--file_path", type=str, default="./", help="file_path where the files to parse are located"
)
parser.add_argument(
"--data_type", type=str, default="amazon", choices=["amazon"], help="dataset source"
)
parser.add_argument(
"--epochs",
type=int,
default=10,
help="Number of epochs for both models",
action=check_size(1, 20000),
)
args_in = parser.parse_args()
# Check file path
if args_in.file_path:
validate_existing_filepath(args_in.file_path)
if args_in.data_type == "amazon":
data_in = Amazon_Reviews(args_in.file_path)
ensemble_models(data_in, args_in)
| 5,238 | 33.24183 | 100 | py |
nlp-architect | nlp-architect-master/examples/supervised_sentiment/supervised_sentiment.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import tensorflow as tf
def simple_lstm(max_features, dense_out, input_length, embed_dim=256, lstm_out=140, dropout=0.5):
"""
Simple Bi-direction LSTM Model in Keras
Single layer bi-directional lstm with recurrent dropout and a fully connected layer
Args:
max_features (int): vocabulary size
dense_out (int): size out the output dense layer, this is the number of classes
input_length (int): length of the input text
embed_dim (int): internal embedding size used in the lstm
lstm_out (int): size of the bi-directional output layer
dropout (float): value for recurrent dropout, between 0 and 1
Returns:
model (model): LSTM model
"""
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Embedding(max_features, embed_dim, input_length=input_length))
model.add(
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(lstm_out, recurrent_dropout=dropout, activation="tanh")
)
)
model.add(tf.keras.layers.Dense(dense_out, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
def one_hot_cnn(dense_out, max_len=300, frame="small"):
"""
Temporal CNN Model
As defined in "Text Understanding from Scratch" by Zhang, LeCun 2015
https://arxiv.org/pdf/1502.01710v4.pdf
This model is a series of 1D CNNs, with a maxpooling and fully connected layers.
The frame sizes may either be large or small.
Args:
dense_out (int): size out the output dense layer, this is the number of classes
max_len (int): length of the input text
frame (str): frame size, either large or small
Returns:
model (model): temporal CNN model
"""
if frame == "large":
cnn_size = 1024
fully_connected = [2048, 2048, dense_out]
else:
cnn_size = 256
fully_connected = [1024, 1024, dense_out]
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv1D(cnn_size, 7, padding="same", input_shape=(68, max_len)))
model.add(tf.keras.layers.MaxPooling1D(pool_size=3))
print(model.output_shape)
# Input = 22 x 256
model.add(tf.keras.layers.Conv1D(cnn_size, 7, padding="same"))
model.add(tf.keras.layers.MaxPooling1D(pool_size=3))
print(model.output_shape)
# Input = 7 x 256
model.add(tf.keras.layers.Conv1D(cnn_size, 3, padding="same"))
# Input = 7 x 256
model.add(tf.keras.layers.Conv1D(cnn_size, 3, padding="same"))
model.add(tf.keras.layers.Conv1D(cnn_size, 3, padding="same"))
# Input = 7 x 256
model.add(tf.keras.layers.Conv1D(cnn_size, 3, padding="same"))
model.add(tf.keras.layers.MaxPooling1D(pool_size=3))
model.add(tf.keras.layers.Flatten())
# Fully Connected Layers
# Input is 512 Output is 1024/2048
model.add(tf.keras.layers.Dense(fully_connected[0]))
model.add(tf.keras.layers.Dropout(0.75))
model.add(tf.keras.layers.Activation("relu"))
# Input is 1024/2048 Output is 1024/2048
model.add(tf.keras.layers.Dense(fully_connected[1]))
model.add(tf.keras.layers.Dropout(0.75))
model.add(tf.keras.layers.Activation("relu"))
# Input is 1024/2048 Output is dense_out size (number of classes)
model.add(tf.keras.layers.Dense(fully_connected[2]))
model.add(tf.keras.layers.Activation("softmax"))
# Stochastic gradient parameters as set by paper
sgd = tf.keras.optimizers.SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)
model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])
return model
| 4,413 | 35.180328 | 97 | py |
nlp-architect | nlp-architect-master/examples/supervised_sentiment/optimize_example.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import argparse
import pickle
from hyperopt import fmin, tpe, hp, Trials
from sklearn.model_selection import train_test_split
# pylint: disable=no-name-in-module
from tensorflow.python.keras.callbacks import EarlyStopping
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.preprocessing.text import Tokenizer
from examples.supervised_sentiment.amazon_reviews import Amazon_Reviews
from nlp_architect.utils.io import validate_parent_exists, check_size, validate_existing_filepath
from .supervised_sentiment import simple_lstm
max_len = 100
batch_size = 32
def run_loss(args):
data = args["data"]
# For each run we want to get a new random balance
data.process()
# split, train, test
dense_out = len(data.labels[0])
# split for all models
X_train_, X_test_, Y_train, Y_test = train_test_split(
data.text, data.labels, test_size=0.20, random_state=42
)
print(args)
# Prep data for the LSTM model
# This currently will train the tokenizer on all text (unbalanced and train/test)
# It would be nice to replace this with a pretrained embedding on larger text
tokenizer = Tokenizer(num_words=int(args["max_features"]), split=" ")
tokenizer.fit_on_texts(data.all_text)
X_train = tokenizer.texts_to_sequences(X_train_)
X_train = pad_sequences(X_train, maxlen=max_len)
X_test = tokenizer.texts_to_sequences(X_test_)
X_test = pad_sequences(X_test, maxlen=max_len)
# Train the LSTM model
lstm_model = simple_lstm(
int(args["max_features"]),
dense_out,
X_train.shape[1],
int(args["embed_dim"]),
int(args["lstm_out"]),
args["dropout"],
)
if args["epochs"] == 0:
args["epochs"] = 1
es = EarlyStopping(monitor="val_acc", min_delta=0, patience=6, verbose=0, mode="max")
model_hist = lstm_model.fit(
X_train,
Y_train,
epochs=args["epochs"],
batch_size=batch_size,
verbose=1,
validation_data=(X_test, Y_test),
callbacks=[es],
)
lstm_acc = model_hist.history["val_acc"][-1]
print("LSTM model accuracy ", lstm_acc)
# This minimizes, so the maximize we have to take the inverse :)
return 1 - lstm_acc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--file_path",
type=validate_existing_filepath,
default="./",
help="file_path where the files to parse are located",
)
parser.add_argument("--data_type", type=str, default="amazon", choices=["amazon"])
parser.add_argument(
"--output_file",
type=validate_parent_exists,
default="./opt_trials.pkl",
help="file_path where the output of the trials will be located",
)
parser.add_argument("--new_trials", type=int, default=20, action=check_size(1, 20000))
args_in = parser.parse_args()
# Check inputs
if args_in.file_path:
validate_existing_filepath(args_in.file_path)
if args_in.output_file:
validate_parent_exists(args_in.output_file)
if args_in.data_type == "amazon":
data_in = Amazon_Reviews(args_in.file_path)
try:
if args_in.output_file.endswith(".pkl"):
with open(args_in.output_file, "rb") as read_f:
trials_to_keep = pickle.load(read_f)
print("Utilizing existing trial files")
else:
trials_to_keep = Trials()
# If the file does not already exist we will start with a new set of trials
except FileNotFoundError:
trials_to_keep = Trials()
space = {
"data": data_in,
"max_features": hp.choice("max_features", [500, 1000, 2000, 3000]),
"embed_dim": hp.uniform("embed_dim", 100, 500),
"lstm_out": hp.uniform("lstm_out", 50, 300),
"epochs": hp.randint("epochs", 50),
"dropout": hp.uniform("dropout", 0, 0.1),
}
num_evals = len(trials_to_keep.trials) + args_in.new_trials
best = fmin(run_loss, space=space, algo=tpe.suggest, max_evals=num_evals, trials=trials_to_keep)
# Write out the trials
with open(args_in.output_file, "wb") as f:
pickle.dump(trials_to_keep, f)
| 4,985 | 34.112676 | 100 | py |
nlp-architect | nlp-architect-master/examples/chunker/train.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division, print_function, unicode_literals, absolute_import
import argparse
import pickle
from os import path
from tensorflow import keras
from nlp_architect.nn.tensorflow.python.keras.callbacks import ConllCallback
from nlp_architect.data.sequential_tagging import CONLL2000
from nlp_architect.models.chunker import SequenceChunker
from nlp_architect.utils.embedding import load_word_embeddings, get_embedding_matrix
from nlp_architect.utils.io import (
validate_existing_filepath,
validate_parent_exists,
validate,
validate_existing_directory,
)
from nlp_architect.utils.metrics import get_conll_scores
def create_argument_parser():
_parser = argparse.ArgumentParser()
_parser.add_argument(
"--data_dir",
type=validate_existing_directory,
help="Path to directory containing CONLL2000 files",
)
_parser.add_argument(
"--embedding_model",
type=validate_existing_filepath,
help="Word embedding model path (GloVe/Fasttext/textual)",
)
_parser.add_argument("--sentence_length", default=50, type=int, help="Maximum sentence length")
_parser.add_argument(
"--char_features",
default=False,
action="store_true",
help="use word character features in addition to words",
)
_parser.add_argument(
"--max_word_length",
type=int,
default=12,
help="maximum number of character in one word " "(if --char_features is enabled)",
)
_parser.add_argument(
"--feature_size",
default=100,
type=int,
help="Feature vector size (in embedding and LSTM layers)",
)
_parser.add_argument(
"--use_cudnn", default=False, action="store_true", help="use CUDNN based LSTM cells"
)
_parser.add_argument(
"--classifier",
default="crf",
choices=["crf", "softmax"],
type=str,
help="classifier to use in last layer",
)
_parser.add_argument("-b", default=10, type=int, help="batch size")
_parser.add_argument("-e", default=10, type=int, help="number of epochs run fit model")
_parser.add_argument(
"--model_name",
default="chunker_model",
type=str,
help="Model name (used for saving the model)",
)
return _parser
def _save_model():
model_params = {
"word_vocab": dataset.word_vocab,
"pos_vocab": dataset.pos_vocab,
"chunk_vocab": dataset.chunk_vocab,
}
if args.char_features is True:
model_params.update({"char_vocab": dataset.char_vocab})
with open(settings_path, "wb") as fp:
pickle.dump(model_params, fp)
model.save(model_path)
if __name__ == "__main__":
# read input args and validate
parser = create_argument_parser()
args = parser.parse_args()
validate((args.sentence_length, int, 1, 1000))
validate((args.feature_size, int, 1, 10000))
validate((args.b, int, 1, 100000))
validate((args.e, int, 1, 100000))
model_path = path.join(
path.dirname(path.realpath(__file__)), "{}.h5".format(str(args.model_name))
)
settings_path = path.join(
path.dirname(path.realpath(__file__)), "{}.params".format(str(args.model_name))
)
validate_parent_exists(model_path)
# load dataset and get tokens/chunks/pos tags
dataset = CONLL2000(
data_path=args.data_dir,
sentence_length=args.sentence_length,
extract_chars=args.char_features,
max_word_length=args.max_word_length,
)
train_set = dataset.train_set
test_set = dataset.test_set
words_train, pos_train, chunk_train = train_set[:3]
words_test, pos_test, chunk_test = test_set[:3]
# get label sizes, transform y's into 1-hot encoding
chunk_labels = len(dataset.chunk_vocab) + 1
pos_labels = len(dataset.pos_vocab) + 1
word_vocab_size = len(dataset.word_vocab) + 2
char_vocab_size = None
if args.char_features is True:
char_train = train_set[3]
char_test = test_set[3]
char_vocab_size = len(dataset.char_vocab) + 2
pos_train = keras.utils.to_categorical(pos_train, num_classes=pos_labels)
chunk_train = keras.utils.to_categorical(chunk_train, num_classes=chunk_labels)
pos_test = keras.utils.to_categorical(pos_test, num_classes=pos_labels)
chunk_test = keras.utils.to_categorical(chunk_test, num_classes=chunk_labels)
# build model with input parameters
model = SequenceChunker(use_cudnn=args.use_cudnn)
model.build(
word_vocab_size,
pos_labels,
chunk_labels,
char_vocab_size=char_vocab_size,
max_word_len=args.max_word_length,
feature_size=args.feature_size,
classifier=args.classifier,
)
# initialize word embedding if external model selected
if args.embedding_model is not None:
embedding_model, _ = load_word_embeddings(args.embedding_model)
embedding_mat = get_embedding_matrix(embedding_model, dataset.word_vocab)
model.load_embedding_weights(embedding_mat)
# train the model
if args.char_features is True:
train_features = [words_train, char_train]
test_features = [words_test, char_test]
else:
train_features = words_train
test_features = words_test
train_labels = [pos_train, chunk_train]
test_labels = [pos_test, chunk_test]
chunk_f1_cb = ConllCallback(test_features, chunk_test, dataset.chunk_vocab.vocab, batch_size=64)
model.fit(
train_features,
train_labels,
epochs=args.e,
batch_size=args.b,
validation_data=(test_features, test_labels),
callbacks=[chunk_f1_cb],
)
# save model
_save_model()
# load model
model = SequenceChunker(use_cudnn=args.use_cudnn)
model.load(model_path)
# print evaluation metric
chunk_pred = model.predict(test_features, 64)
res = get_conll_scores(chunk_pred, chunk_test, dataset.chunk_vocab.reverse_vocab())
print(res)
| 6,774 | 33.92268 | 100 | py |
nlp-architect | nlp-architect-master/examples/ner/train.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import argparse
import pickle
from os import path
import numpy as np
from tensorflow import keras
from nlp_architect.nn.tensorflow.python.keras.callbacks import ConllCallback
from nlp_architect.data.sequential_tagging import SequentialTaggingDataset
from nlp_architect.models.ner_crf import NERCRF
from nlp_architect.utils.embedding import get_embedding_matrix, load_word_embeddings
from nlp_architect.utils.io import validate, validate_existing_filepath, validate_parent_exists
from nlp_architect.utils.metrics import get_conll_scores
def read_input_args():
parser = argparse.ArgumentParser()
parser.add_argument("-b", type=int, default=10, help="Batch size")
parser.add_argument("-e", type=int, default=10, help="Number of epochs")
parser.add_argument(
"--train_file",
type=validate_existing_filepath,
required=True,
help="Train file (sequential tagging dataset format)",
)
parser.add_argument(
"--test_file",
type=validate_existing_filepath,
required=True,
help="Test file (sequential tagging dataset format)",
)
parser.add_argument(
"--tag_num", type=int, default=2, help="Entity labels tab number in train/test files"
)
parser.add_argument("--sentence_length", type=int, default=50, help="Max sentence length")
parser.add_argument("--word_length", type=int, default=12, help="Max word length in characters")
parser.add_argument(
"--word_embedding_dims",
type=int,
default=100,
help="Word features embedding dimension size",
)
parser.add_argument(
"--character_embedding_dims",
type=int,
default=25,
help="Character features embedding dimension size",
)
parser.add_argument(
"--char_features_lstm_dims",
type=int,
default=25,
help="Character feature extractor LSTM dimension size",
)
parser.add_argument(
"--entity_tagger_lstm_dims", type=int, default=100, help="Entity tagger LSTM dimension size"
)
parser.add_argument("--dropout", type=float, default=0.2, help="Dropout rate")
parser.add_argument(
"--embedding_model",
type=validate_existing_filepath,
help="Path to external word embedding model file",
)
parser.add_argument(
"--model_path", type=str, default="model.h5", help="Path for saving model weights"
)
parser.add_argument(
"--model_info_path",
type=str,
default="model_info.dat",
help="Path for saving model topology",
)
parser.add_argument(
"--use_cudnn", default=False, action="store_true", help="use CUDNN based LSTM cells"
)
input_args = parser.parse_args()
validate_input_args(input_args)
return input_args
def validate_input_args(input_args):
validate((input_args.b, int, 1, 100000))
validate((input_args.e, int, 1, 100000))
validate((input_args.tag_num, int, 1, 1000))
validate((input_args.sentence_length, int, 1, 10000))
validate((input_args.word_length, int, 1, 100))
validate((input_args.word_embedding_dims, int, 1, 10000))
validate((input_args.character_embedding_dims, int, 1, 1000))
validate((input_args.char_features_lstm_dims, int, 1, 10000))
validate((input_args.entity_tagger_lstm_dims, int, 1, 10000))
validate((input_args.dropout, float, 0, 1))
model_path = path.join(path.dirname(path.realpath(__file__)), str(input_args.model_path))
validate_parent_exists(model_path)
model_info_path = path.join(
path.dirname(path.realpath(__file__)), str(input_args.model_info_path)
)
validate_parent_exists(model_info_path)
if __name__ == "__main__":
# parse the input
args = read_input_args()
# load dataset and parameters
dataset = SequentialTaggingDataset(
args.train_file,
args.test_file,
max_sentence_length=args.sentence_length,
max_word_length=args.word_length,
tag_field_no=args.tag_num,
)
# get the train and test data sets
x_train, x_char_train, y_train = dataset.train_set
x_test, x_char_test, y_test = dataset.test_set
num_y_labels = len(dataset.y_labels) + 1
vocabulary_size = dataset.word_vocab_size
char_vocabulary_size = dataset.char_vocab_size
y_test = keras.utils.to_categorical(y_test, num_y_labels)
y_train = keras.utils.to_categorical(y_train, num_y_labels)
ner_model = NERCRF(use_cudnn=args.use_cudnn)
# pylint: disable=unexpected-keyword-arg
ner_model.build(
args.word_length,
num_y_labels,
vocabulary_size,
char_vocabulary_size,
word_embedding_dims=args.word_embedding_dims,
char_embedding_dims=args.character_embedding_dims,
tagger_lstm_dims=args.entity_tagger_lstm_dims,
dropout=args.dropout,
)
# initialize word embedding if external model selected
if args.embedding_model is not None:
embedding_model, _ = load_word_embeddings(args.embedding_model)
embedding_mat = get_embedding_matrix(embedding_model, dataset.word_vocab)
ner_model.load_embedding_weights(embedding_mat)
train_inputs = [x_train, x_char_train]
test_inputs = [x_test, x_char_test]
train_inputs.append(np.sum(np.not_equal(x_train, 0), axis=-1).reshape((-1, 1)))
test_inputs.append(np.sum(np.not_equal(x_test, 0), axis=-1).reshape((-1, 1)))
conll_cb = ConllCallback(test_inputs, y_test, dataset.y_labels.vocab, batch_size=args.b)
ner_model.fit(
x=train_inputs,
y=y_train,
batch_size=args.b,
epochs=args.e,
callbacks=[conll_cb],
validation=(test_inputs, y_test),
)
# saving model
ner_model.save(args.model_path)
with open(args.model_info_path, "wb") as fp:
info = {
"y_vocab": dataset.y_labels.vocab,
"word_vocab": dataset.word_vocab.vocab,
"char_vocab": dataset.char_vocab.vocab,
}
pickle.dump(info, fp)
# running predictions
predictions = ner_model.predict(x=test_inputs, batch_size=args.b)
eval = get_conll_scores(predictions, y_test, {v: k for k, v in dataset.y_labels.vocab.items()})
print(eval)
| 6,991 | 35.994709 | 100 | py |
nlp-architect | nlp-architect-master/solutions/InterpreT/application/tasks.py | # ******************************************************************************
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
""" Module for defining a "Task" (e.g. ABSA, WSC) for InterpreT.
The tasks defined in this module will be used to configure the
InterpreT app.
"""
from typing import List, Tuple, Dict, Union
from abc import ABC, abstractmethod
import torch
import pandas as pd
import numpy as np
class Task(ABC):
"""Abstract class for defining tasks.
To create your own task, please refer to the provided ABSATask and WSCTask as examples for how to do so.
There are 4 abstract methods that must be defined: _init_tsne_layout(), _init_table_cols_layout(),
get_val_to_label_map(), get_tsne_rows(). These definitions will all depend on how the input dataframe
is structured.
Attributes:
mode: 'normal' if a single model is loaded, else 'compare'
num_layers: number of transformer layers in the model
model_ids: ["model1"] or ["model1", "model2"], used internally
model_names: a list of the given model names to be displayed in app (correspond to above model_ids)
collateral_paths: list of filepaths to the model collaterals (attn, hidden_states)
df_paths: list of filepaths to model dataframe (everything else)
tsne_internal_options: list of column names from the dataframe to color the tsne plot with
tsne_displayed_options: list of names to display corresponding to the above plot options
tsne_plot_description: string title for the tsne plot
attn_map_options: list of options to display for attention map plot
head_matrix_options: list of options to display for head matrix plot
table_cols: list of columns to display for the summary table
map_model_id_to_name: dictionary mapping "model1"/"model2" (used internally) to map_model_to_path
map_model_to_full_df:dict mapping model_ids to dataframes (after _preprocess_df)
map_model_to_df: dict mapping model_ids to dataframes (after _preprocess_df and _process_all_df)
map_model_to_collateral: dict mapping model_ids to collateral pahts
"""
def __init__(
self,
mode: str,
num_layers: int,
model_names: List[str],
collaterals_paths: List[str],
df_paths: List[str],
):
# General attributes and filepaths
self.mode = mode
self.num_layers = num_layers
self.model_ids = [f"model{i + 1}" for i in range(len(collaterals_paths))]
self.model_names = model_names if model_names is not None else self.model_ids
self.collaterals_paths = collaterals_paths
self.df_paths = df_paths
# Mappings to and from user-provided model names (e.g. "BERT", "LiBERT") to internally used model name ("model1", "model2")
self.map_model_id_to_name, self.map_model_name_to_id = self._init_model_maps()
# Layout options
(
self.tsne_internal_options,
self.tsne_displayed_options,
self.tsne_plot_description,
) = self._init_tsne_layout()
self.attn_map_options = self._init_attn_layout()
self.head_matrix_options = self._init_head_matrix_layout()
self.table_cols = self._init_table_cols_layout()
# Mappings to from model names to different data sources (dataframe, collateral)
self.map_model_to_full_df, self.map_model_to_df = self._init_df_maps()
self.map_model_to_collateral = self._init_collateral_map()
@abstractmethod
def _init_tsne_layout(self) -> List[str]:
"""Get layout options for the tsne plot."""
pass
def _init_attn_layout(self) -> List[str]:
"""Get layout options for the attention visualization."""
attn_map_options = []
for model_name in self.model_names:
attn_map_options += [
model_name,
]
if self.mode == "compare":
attn_map_options += ["delta"]
return attn_map_options
def _init_head_matrix_layout(self) -> List[str]:
"""Get layout options for the head matrix summary plot."""
head_matrix_options = []
for model_name in self.model_names:
head_matrix_options += [
f"{model_name}_std",
]
if self.mode == "compare":
head_matrix_options += ["delta_std"]
return self._add_head_matrix_options(head_matrix_options)
def _add_head_matrix_options(self, head_matrix_options: List[str]) -> List[str]:
"""Add additional task-specific head matrix summary option labels (e.g. custom metrics)."""
return head_matrix_options
@abstractmethod
def _init_table_cols_layout(self) -> List[str]:
"""Get column labels for the summary table."""
pass
def _init_model_maps(self) -> Dict[str, str]:
"""Creates mapping between internal model ids ('model1'/'model2') and given model names."""
map_model_id_to_name = {
model_id: model_name for model_id, model_name in zip(self.model_ids, self.model_names)
}
map_model_name_to_id = {
model_name: model_id for model_id, model_name in zip(self.model_ids, self.model_names)
}
return map_model_id_to_name, map_model_name_to_id
def _init_df_maps(self) -> Tuple[Dict[str, pd.DataFrame]]:
"""Reads and processes dataframe for use by application."""
full_dfs = {}
tsne_dfs = {}
for model_id, df in zip(self.model_ids, self.df_paths):
full_dfs[model_id] = pd.read_csv(df)
tsne_dfs[model_id] = self._preprocess_df(
full_dfs[model_id]
) # model-specific processing
tsne_dfs = self._process_all_df(tsne_dfs) # model-agnostic processing
return full_dfs, tsne_dfs
def _init_collateral_map(self) -> Dict[str, Dict[str, Union[np.array, List[str]]]]:
"""Returns dict mapping model names to collaterals (attentions and hidden states)."""
collaterals = {}
for model_id, collateral in zip(self.model_ids, self.collaterals_paths):
collaterals[model_id] = torch.load(collateral)
return collaterals
def get_dropdown_to_df_col_map(self) -> Dict[str, str]:
"""Returns dict mapping values for the column to labels based on dropdown_color_option"""
return {
displayed_option: internal_option
for displayed_option, internal_option in zip(
self.tsne_displayed_options, self.tsne_internal_options
)
}
@abstractmethod
def get_val_to_label_map(self, dropdown_color_option: str) -> Dict[float, str]:
"""Returns dict mapping t-SNE plot color option to a dictionary of (val, label) k/v pairs, for categorical plotting."""
pass
def _preprocess_df(self, df: pd.DataFrame) -> pd.DataFrame:
"""Applies model-specific preprocessing to a dataframe."""
return df
def _process_all_df(self, dfs: Dict[str, pd.DataFrame]) -> Dict[str, pd.DataFrame]:
"""Processes list of dataframes to be used by application."""
return dfs
@abstractmethod
def get_tsne_rows(
self, saved_click: Dict[str, pd.DataFrame], model_selector_val: str
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get rows from the tsne dataframe based on click."""
pass
def get_attn_map(self, attn_map_option: str, example_id: int) -> Tuple[np.array, List[str]]:
"""Get attention matrix for a specific summary option and example indices."""
# Collect attentions and tokens for each model
for model_id, model_name in zip(self.model_ids, self.model_names):
if attn_map_option == model_name:
tokens = self.map_model_to_collateral[model_id][example_id]["tokens"]
attns = self.map_model_to_collateral[model_id][example_id]["attentions"]
# Compute attention delta if comparing two models
if attn_map_option == "delta":
tokens = self.map_model_to_collateral["model1"][example_id]["tokens"]
attns1 = np.stack(self.map_model_to_collateral["model1"][example_id]["attentions"])
attns2 = np.stack(self.map_model_to_collateral["model2"][example_id]["attentions"])
attns = attns1 - attns2
attns, tokens = self._unpad_tokens(attns, tokens)
return attns, tokens
def get_head_matrix(self, head_matrix_option: str, example_ids: List[int]) -> np.array:
"""Get summary plot matrix for a task-AGNOSTIC summary option and example indices."""
z = None
attentions_dict = {}
# Compute std(attn) if given the corresponding option
for model_id, model_name in zip(self.model_ids, self.model_names):
if head_matrix_option == f"{model_name}_std":
attns = [
np.stack(self.map_model_to_collateral[model_id][example_id]["attentions"])
for example_id in example_ids
]
attentions_dict[model_id] = attns
z = np.mean([attn.std(axis=(2, 3)) for attn in attns], axis=0)
# Compute std(attn2 - attn1) if given the corresponding option
if self.mode == "compare":
if head_matrix_option == "delta_std":
attns1 = attentions_dict["model1"]
attns2 = attentions_dict["model2"]
attns_delta = [attn2 - attn1 for (attn1, attn2) in zip(attns1, attns2)]
z = np.mean([attn.std(axis=(2, 3)) for attn in attns_delta], axis=0)
# If head_matrix_option not task-agnostic
if z is None:
z = self.get_specific_head_matrix(head_matrix_option, example_ids)
assert z is not None, "z is None"
return z
def get_specific_head_matrix(self, head_matrix_option: str, example_ids: List[int]) -> np.array:
"""Get summary plot matrix for a task-SPECIFIC summary options and example indices (e.g. custom metrics)."""
return None
def get_name(self) -> str:
"""Returns name of task, as defined by subclass."""
return self.name
def get_df_col_to_plot(self) -> str:
"""Returns the DataFrame column to be used for the t-SNE plot, as defined by subclass."""
return self.df_col_to_plot
@abstractmethod
def get_summary_table(self, saved_click: pd.DataFrame) -> Dict[str, Union[str, int]]:
"""Updates summary table with values associated with the saved click."""
pass
def _unpad_tokens(self, attns: np.array, tokens: List[str]) -> Tuple[np.array, List[str]]:
"""Unpad tokens if we need to."""
return attns, tokens
class WSCTask(Task):
def __init__(self, *args, **kwargs):
super(WSCTask, self).__init__(*args, **kwargs)
self.name = "wsc"
self.df_col_to_plot = "span_token"
def _init_tsne_layout(self) -> List[str]:
"""Get layout options for the tsne plot."""
tsne_internal_options = ["acc", "tp", "pred", "target"]
tsne_displayed_options = ["acc_coloring", "tp_coloring", "pred_coloring", "target_coloring"]
tsne_plot_description = "Plotting Coreferent Span Tokens"
return tsne_internal_options, tsne_displayed_options, tsne_plot_description
def _add_head_matrix_options(self, head_matrix_options: List[str]) -> List[str]:
"""Add coreference intensity metric for WSC."""
for model_name in self.model_names:
head_matrix_options += [
f"{model_name}_coreference_intensity",
f"{model_name}_accuracy_based_on_head",
]
if self.mode == "compare":
head_matrix_options += ["coreference_intensity_delta"]
return head_matrix_options
def _init_table_cols_layout(self) -> List[str]:
"""Get column labels for the summary table for WSC."""
table_cols = ["span1", "span2", "target"]
for model_name in self.model_names:
table_cols += [f"pred: {model_name}", f"acc: {model_name}"]
return table_cols
def get_val_to_label_map(self, dropdown_color_option: str) -> Dict[float, str]:
"""Returns dict mapping t-SNE plot color option to a dictionary of (val, label) k/v pairs, for categorical plotting."""
return {0.0: "False/No", 0.5: "Ambiguous", 1.0: "True/Yes"}
def _preprocess_df(self, df: pd.DataFrame) -> pd.DataFrame:
"""Dropping repetitive tokens in the DataFrame."""
new_df = df.sort_values("target", axis="rows", ascending=False)
new_df = new_df.drop_duplicates(["aggr_layer_12_tsne_x", "aggr_layer_12_tsne_y"])
new_df[self.tsne_displayed_options] = new_df[self.tsne_displayed_options].applymap(
lambda x: x if (x == 0 or x == 1) else 0.5
)
return new_df
def get_tsne_rows(
self, saved_click: Dict[str, pd.DataFrame], model_selector_val: str
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get rows from the tsne dataframe based on click."""
tsne_df = self.map_model_to_df[self.map_model_name_to_id[model_selector_val]]
selected_sentence = saved_click["sentence"] # Getting sentence idx
selected_rows = tsne_df.merge( # Getting rows in df corresponding to selected sentence
selected_sentence, on=["sentence"]
)
other_rows = tsne_df[tsne_df["id"] != selected_sentence.iloc[0]]
return other_rows, selected_rows
def get_specific_head_matrix(self, head_matrix_option: str, example_ids: List[int]) -> np.array:
"""Get summary plot matrix for a WSC-SPECIFIC summary options and example indices."""
z = None
for model_id, model_name in zip(self.model_ids, self.model_names):
selected_attentions_spans = [
np.stack(self.map_model_to_collateral[model_id][index]["attention_spans"])
for index in example_ids
] # to collect multiple attention matrices (from different examples)
if head_matrix_option == f"{model_name}_coreference_intensity":
z = np.mean(
[attn.max(axis=(2, 3)) for attn in selected_attentions_spans], axis=0
) # when more than 1 token in span> select max of them
elif head_matrix_option == f"{model_name}_accuracy_based_on_head":
z = self.map_model_to_collateral[model_id][example_ids[0]]["acc_matrix"]
if self.mode == "compare":
selected_attentions_spans1 = [
np.stack(self.map_model_to_collateral["model1"][index]["attention_spans"])
for index in example_ids
] # to collect multiple attention matrices (from different examples)
selected_attentions_spans2 = [
np.stack(self.map_model_to_collateral["model2"][index]["attention_spans"])
for index in example_ids
] # to collect multiple attention matrices (from different examples)
if head_matrix_option == "coreference_intensity_delta":
span_weights_delta = [
attentions_spans2.max(axis=(2, 3)) - attentions_spans1.max(axis=(2, 3))
for (attentions_spans1, attentions_spans2) in zip(
selected_attentions_spans1, selected_attentions_spans2
)
]
z = np.mean(span_weights_delta, axis=0)
return z
def _unpad_tokens(self, attns: np.array, tokens: List[str]) -> Tuple[np.array, List[str]]:
"""Removing [PAD] tokens."""
# Making sure we don't get any [PAD] tokens
attns = attns[:, :, : len(tokens), : len(tokens)]
# TODO: figure out why we only need this for superglue and not ABSA
return attns, tokens
def get_dist_per_layer(
self, option: str, model_selector_val: str, selected_sentences: pd.DataFrame
) -> Tuple[np.array, np.array]:
"""Computing the mean distance per layer between span tokens for target/pred == 1 and target/pred == 0."""
# Getting example rows and sentence rows (minus example rows) in df
curr_model_full_df = self.map_model_to_full_df[
self.map_model_name_to_id[model_selector_val]
]
selected_sentence_rows = curr_model_full_df.loc[
curr_model_full_df["sentence_idx"].isin(selected_sentences)
]
all_ex_ids = selected_sentence_rows["id"].unique()
span_agg_distance_0 = np.zeros(self.num_layers + 1)
span_agg_distance_1 = np.zeros(self.num_layers + 1)
count_0 = 0
count_1 = 0
for ex_id in all_ex_ids:
ex_rows = selected_sentence_rows[selected_sentence_rows["id"] == ex_id]
span1 = ex_rows["span1"].iloc[0]
span2 = ex_rows["span2"].iloc[0]
span1_rows = ex_rows[ex_rows["span"] == span1]
span2_rows = ex_rows[ex_rows["span"] == span2]
span1_coords = [
np.array(
(
span1_rows[f"layer_{layer:02}_tsne_x"].mean(),
span1_rows[f"layer_{layer:02}_tsne_y"].mean(),
)
)
for layer in range(self.num_layers + 1)
]
span2_coords = [
np.array(
(
span2_rows[f"layer_{layer:02}_tsne_x"].mean(),
span1_rows[f"layer_{layer:02}_tsne_y"].mean(),
)
)
for layer in range(self.num_layers + 1)
]
dist_per_layer = np.array(
[
np.linalg.norm(span1_coord - span2_coord)
for span1_coord, span2_coord in zip(span1_coords, span2_coords)
]
)
if ex_rows[option].iloc[0] == 1:
span_agg_distance_1 += dist_per_layer
count_1 += 1
else:
span_agg_distance_0 += dist_per_layer
count_0 += 1
# Averaging by number of examples
span_mean_distance_0 = span_agg_distance_0 / count_0
span_mean_distance_1 = span_agg_distance_1 / count_1
return span_mean_distance_0, span_mean_distance_1
def get_summary_table(self, saved_click: pd.DataFrame) -> Dict[str, Union[str, int]]:
"""Updates summary table with values associated with the saved click."""
selected_sentence = saved_click["sentence"]
cols = ["span1", "span2", "acc", "pred", "target"]
models_sentence_df = {}
for model_id, model_name in zip(self.model_ids, self.model_names):
sentence_df = self.map_model_to_df[model_id]
sentence_df = sentence_df[sentence_df["sentence"] == selected_sentence]
sentence_df = sentence_df[cols]
sentence_df = sentence_df.drop_duplicates()
sentence_df.columns = [
"span1",
"span2",
f"acc: {model_name}",
f"pred: {model_name}",
f"target: {model_name}",
]
models_sentence_df[model_id] = sentence_df
if self.mode == "normal":
sentence_df = models_sentence_df["model1"]
elif self.mode == "compare":
sentence_df = models_sentence_df["model1"].merge(
models_sentence_df["model2"], on=["span1", "span2"]
)
sentence_df = sentence_df.drop(
f"target: {self.map_model_id_to_name['model2']}", axis="columns"
)
sentence_df = sentence_df.rename(
{f"target: {self.map_model_id_to_name['model1']}": "target"}, axis="columns"
)
cols = [
"span1",
"span2",
"target",
f"pred: {self.map_model_id_to_name['model1']}",
f"acc: {self.map_model_id_to_name['model1']}",
]
if self.mode == "compare":
cols += [
f"pred: {self.map_model_id_to_name['model2']}",
f"acc: {self.map_model_id_to_name['model2']}",
]
sentence_df = sentence_df[cols]
return sentence_df.to_dict("records")
class ABSATask(Task):
def __init__(self, *args, **kwargs):
super(ABSATask, self).__init__(*args, **kwargs)
self.name = "absa"
self.df_col_to_plot = "aspect"
def _init_tsne_layout(self) -> List[str]:
"""Get layout options for the tsne plot."""
tsne_internal_options = ["domain"]
for model_name in self.model_names:
tsne_internal_options += [f"f1_{model_name}"]
if self.mode == "compare":
tsne_internal_options += ["f1_delta"]
tsne_plot_description = "Plotting Aspect Tokens"
tsne_displayed_options = tsne_internal_options
return tsne_internal_options, tsne_displayed_options, tsne_plot_description
def _add_head_matrix_options(self, head_matrix_options: List[str]) -> List[str]:
"""Add grammar correlation option for ABSA."""
for model_name in self.model_names:
head_matrix_options += [f"{model_name}_grammar_correlation"]
if self.mode == "compare":
head_matrix_options += ["grammar_correlation_delta"]
return head_matrix_options
def _init_table_cols_layout(self) -> List[str]:
"""Get column labels for the summary table for ABSA."""
table_cols = ["words", "target"]
for model_name in self.model_names:
table_cols += [f"pred: {model_name}"]
return table_cols
def get_val_to_label_map(self, dropdown_color_option: str) -> Dict[float, str]:
"""Returns dict mapping t-SNE plot color option to a dictionary of (val, label) k/v pairs, for categorical plotting."""
return (
{v: k for k, v in self.domain_map.items()}
if dropdown_color_option == "domain"
else None
)
def _preprocess_df(self, df: pd.DataFrame) -> pd.DataFrame:
"""Applies model-specific preprocessing to a dataframe."""
return df
def _process_all_df(self, dfs: Dict[str, pd.DataFrame]) -> Dict[str, pd.DataFrame]:
"""Adds additional information to input DataFrames.
Adds prediction and f1 information from model1_df to model2_df and
vice versa. Additionally computes the f1 deltas.
Args:
model1_df: DataFrame for first model.
model2_df: DataFrame for second model.
Returns:
The modified versions of both DataFrames.
"""
if self.mode == "normal":
(model1_df,) = dfs.values()
model1_df = model1_df.rename(
columns={
"f1": f"f1_{self.map_model_id_to_name['model1']}",
"pred": f"pred_{self.map_model_id_to_name['model1']}",
}
)
domains = model1_df["domain"].unique()
self.domain_map = {domain: val for val, domain in enumerate(domains)}
model1_df["domain"] = model1_df["domain"].map(self.domain_map)
new_df = {}
new_df["model1"] = model1_df
else:
# TODO: Merge both DataFrames into one
model1_df, model2_df = dfs.values()
model1_df = model1_df.rename(
columns={
"f1": f"f1_{self.map_model_id_to_name['model1']}",
"pred": f"pred_{self.map_model_id_to_name['model1']}",
}
)
model2_df = model2_df.rename(
columns={
"f1": f"f1_{self.map_model_id_to_name['model2']}",
"pred": f"pred_{self.map_model_id_to_name['model2']}",
}
)
# Need aspect key for merge for case where aspect get truncated (problem when using pivot phrase due to 64 max seq length truncation)
model1_full_df = model1_df.merge(
model2_df[
[
"aspect",
"id",
f"f1_{self.map_model_id_to_name['model2']}",
f"pred_{self.map_model_id_to_name['model2']}",
]
],
on=["id", "aspect"],
how="inner",
)
model2_full_df = model2_df.merge(
model1_df[
[
"aspect",
"id",
f"f1_{self.map_model_id_to_name['model1']}",
f"pred_{self.map_model_id_to_name['model1']}",
]
],
on=["id", "aspect"],
how="inner",
)
model1_full_df["f1_delta"] = (
model1_full_df[f"f1_{self.map_model_id_to_name['model1']}"]
- model1_full_df[f"f1_{self.map_model_id_to_name['model2']}"]
)
model2_full_df["f1_delta"] = (
model2_full_df[f"f1_{self.map_model_id_to_name['model2']}"]
- model2_full_df[f"f1_{self.map_model_id_to_name['model1']}"]
)
model1_full_df = model1_full_df.drop_duplicates()
model2_full_df = model2_full_df.drop_duplicates()
# Creating numeric mapping for domain labels in dataframe
domains = pd.concat([model1_full_df["domain"], model2_full_df["domain"]]).unique()
self.domain_map = {domain: val for val, domain in enumerate(domains)}
model1_full_df["domain"] = model1_full_df["domain"].map(self.domain_map)
model2_full_df["domain"] = model2_full_df["domain"].map(self.domain_map)
new_df = {}
new_df["model1"] = model1_full_df
new_df["model2"] = model2_full_df
return new_df
def get_tsne_rows(
self, saved_click: Dict[str, pd.DataFrame], model_selector_val: str
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get rows from the tsne dataframe based on click."""
tsne_df = self.map_model_to_df[self.map_model_name_to_id[model_selector_val]]
# Getting example matching id and aspect
selected_ex = saved_click[["aspect", "id"]]
selected_rows = tsne_df.merge(selected_ex, on=["aspect", "id"])
other_rows = tsne_df
return other_rows, selected_rows
def get_specific_head_matrix(self, head_matrix_option: str, example_ids: List[int]) -> np.array:
"""Get summary plot matrix for a ABSA-SPECIFIC summary options and example indices."""
z = None
for model_id, model_name in zip(self.model_ids, self.model_names):
grammar_correlations = [
self.map_model_to_collateral[model_name][index]["grammar_matrices"]
for index in example_ids
]
if head_matrix_option == f"{model_name}_grammar_correlation":
z = np.mean(grammar_correlations, axis=0)
if self.mode == "compare":
grammar_correlations1 = [
self.map_model_to_collateral["model1"][index]["grammar_matrices"]
for index in example_ids
]
grammar_correlations2 = [
self.map_model_to_collateral["model2"][index]["grammar_matrices"]
for index in example_ids
]
if head_matrix_option == "gramar_correlation_delta":
grammar_correlations_delta = [
grammar_correlation2 - grammar_correlation1
for (grammar_correlation1, grammar_correlation2) in zip(
grammar_correlations1, grammar_correlations2
)
]
z = np.mean(grammar_correlations_delta, axis=0)
return z
def get_summary_table(self, saved_click: pd.DataFrame) -> Dict[str, Union[str, int]]:
"""Updates summary table with values associated with the saved click."""
if self.mode == "normal":
cols = ["sentence", "target", f"pred_{self.map_model_id_to_name['model1']}"]
sentence_df = pd.DataFrame([saved_click[el].split(" ") for el in cols]).T
sentence_df.columns = [
"words",
"target",
f"pred: {self.map_model_id_to_name['model1']}",
]
elif self.mode == "compare":
cols = [
"sentence",
"target",
f"pred_{self.map_model_id_to_name['model1']}",
f"pred_{self.map_model_id_to_name['model2']}",
]
sentence_df = pd.DataFrame([saved_click[el].split(" ") for el in cols]).T
sentence_df.columns = [
"words",
"target",
f"pred: {self.map_model_id_to_name['model1']}",
f"pred: {self.map_model_id_to_name['model2']}",
]
return sentence_df.to_dict("records")
def get_task(
task_name: str,
num_layers: int,
model_names: List[str],
collaterals_paths: List[str],
df_paths: List[str],
) -> Task:
"""Gets a task for the app."""
if len(collaterals_paths) == 1:
mode = "normal"
assert len(collaterals_paths) == 1, "Provided wrong number of csv"
elif len(collaterals_paths) == 2:
mode = "compare"
assert len(df_paths) == 2, "Provided wrong number of csv"
print(f"Starting app in '{mode}' mode for '{task_name}'")
if task_name == "absa":
return ABSATask(mode, num_layers, model_names, collaterals_paths, df_paths)
elif task_name == "wsc":
return WSCTask(mode, num_layers, model_names, collaterals_paths, df_paths)
| 30,547 | 42.953957 | 145 | py |
nlp-architect | nlp-architect-master/docs-source/source/conf.py | # -*- coding: utf-8 -*-
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# flake8: noqa
import os
import sys
import sphinx_rtd_theme # noqa: E402
from sphinx.ext import apidoc
from nlp_architect.version import NLP_ARCHITECT_VERSION
# -- Options for HTML output ----------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.todo',
# 'sphinx.ext.coverage',
'sphinx.ext.mathjax',
# 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx_rtd_theme',
]
# Autodoc settings
# autodoc_default_flags = ['members', 'undoc-members', 'inherited-members']
# Autosummary settings
autosummary_generate = True
# Napoleon settings (used to parse google and numpy style docstrings)
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NLP Architect by Intel® AI Lab'
copyright = u'NLP Architect by Intel® AI Lab is a trademarks of Intel Corporation or its subsidiaries \
in the U.S. and/or other countries. * Other names and brands may be claimed as \
the property of others.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y.Z version.
version = NLP_ARCHITECT_VERSION
# The full version, including git-hash and alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
# 'canonical_url': '',
# 'analytics_id': '',
'logo_only': True,
'display_version': False,
# 'prev_next_buttons_location': 'bottom',
'prev_next_buttons_location': None,
'style_external_links': False,
# 'vcs_pageview_mode': '',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'assets/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = ''
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static/']
html_css_files = [
'nlp_arch_theme.css',
# 'https://fonts.googleapis.com/css?family=Lato',
# 'https://fonts.googleapis.com/css?family=Oswald',
'https://fonts.googleapis.com/css?family=Roboto+Mono',
'https://fonts.googleapis.com/css?family=Open+Sans:100,900'
]
html_js_files = [
'install.js'
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'intelaidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [('index', u'NLP Architect Documentation',
u'Intel Corporation', 'manual'), ]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', u'NLP Architect Documentation',
[u'Intel'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# directory menu entry, description, category)
texinfo_documents = [('index', u'NLP Architect Documentation',
u'Intel Corporation'), ]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
# These go in every file
rst_epilog = """
.. include :: <isonum.txt>
.. |ngraph| replace:: ngraph
.. |NLP-Architect| replace:: NLP Architect
.. |Geon| replace:: Nervana Graph
.. |TF| replace:: TensorFlow\ |trade|
"""
def run_apidoc(_):
api_docs = os.path.join(os.path.abspath("./source/"), "generated_api")
argv = ["-f", "-o", api_docs, os.path.abspath("../nlp_architect/")]
apidoc.main(argv)
os.remove(os.path.join(api_docs, "modules.rst"))
os.remove(os.path.join(api_docs, "nlp_architect.rst"))
def setup(app):
app.connect("builder-inited", run_apidoc)
| 9,982 | 32.166113 | 103 | py |
nlp-architect | nlp-architect-master/tests/test_quantization.py | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import unittest
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from nlp_architect.nn.torch.quantization import (
FakeLinearQuantizationWithSTE,
QuantizedLinear,
get_dynamic_scale,
get_scale,
QuantizedEmbedding,
)
def fake_quantize_np(x, scale, bits):
return quantize_np(x, scale, bits) / scale
def quantize_np(x, scale, bits):
return np.clip(np.round(x * scale), -(2 ** (bits - 1) - 1), 2 ** (bits - 1) - 1)
class FakeLinearQuantizationWithSTETester(unittest.TestCase):
def test_quantization_forward(self):
fake_quantize = FakeLinearQuantizationWithSTE().apply
x = torch.randn(1, 100)
scale = (2 ** (8 - 1) - 1) / np.abs(x).max()
self.assertTrue((fake_quantize(x, scale, 8) == fake_quantize_np(x, scale, 8)).all())
def test_quantization_backward(self):
fake_quantize = FakeLinearQuantizationWithSTE().apply
x = torch.randn(1, 100, requires_grad=True)
with torch.no_grad():
scale = (2 ** (8 - 1) - 1) / x.abs().max()
y = torch.sum(fake_quantize(x, scale, 8))
y.backward()
self.assertTrue((x.grad == torch.ones_like(x)).all())
class QuantizedLinearTest(unittest.TestCase):
def test_dynamic_quantized_linear_forward(self):
"""Test QuantizedLinear forward method by giving in the input and
weight values that are already quantized, therefore the quantization
step should have no effect on the values and we know what values
are expected"""
x = torch.randn(1, 100).mul(127.0).round().clamp(-127.0, 127.0)
qlinear = QuantizedLinear(100, 1, bias=False, requantize_output=False, mode="dynamic")
with torch.no_grad():
scale = 127.0 / qlinear.weight.abs().max()
self.assertTrue(
(
qlinear.fake_quantized_weight == fake_quantize_np(qlinear.weight.detach(), scale, 8)
).all()
)
qlinear.weight.data = (
torch.randn_like(qlinear.weight).mul(127.0).round().clamp(-127.0, 127.0)
)
y = qlinear(x)
self.assertEqual(y.shape, (1, 1))
self.assertTrue((y == (x @ qlinear.weight.t())).all())
def test_static_quantized_inference(self):
qlinear = QuantizedLinear(10, 5, mode="EMA")
weight = qlinear.weight.data.detach()
weight_scale = get_dynamic_scale(weight, 8)
weight_int = quantize_np(weight, weight_scale, 8)
self.assertTrue((weight_int == torch.round(weight_int)).all())
self.assertTrue(weight_int.abs().max() <= 127)
x = torch.randn(3, 10) * 2 ** 0.5 - 0.36
x_thresh = 3.0
output_thresh = 2.3
output_scale = 127.0 / output_thresh
x_scale = 127.0 / x_thresh
qlinear.input_thresh = torch.tensor(x_thresh)
qlinear.output_thresh = torch.tensor(output_thresh)
x_int = quantize_np(x, x_scale, 8)
self.assertTrue((x_int == torch.round(x_int)).all())
self.assertTrue(x_int.abs().max() <= 127)
bias = qlinear.bias.data
bias_scale = x_scale * weight_scale
bias_int = quantize_np(bias, bias_scale, 32)
self.assertTrue((bias_int == torch.round(bias_int)).all())
self.assertTrue(bias_int.abs().max() <= 2 ** (32 - 1) - 1)
output_int = x_int @ weight_int.t() + bias_int
output_int = torch.clamp(output_int, -(2 ** (32 - 1) - 1), 2 ** (32 - 1) - 1)
output = torch.round(output_int / bias_scale * output_scale).clamp(-127, 127) / output_scale
qlinear.eval()
qlinear_output = qlinear(x)
self.assertTrue((qlinear_output - output).norm() < 10 ** -6)
def test_ema_quantization(self):
ema_decay = 0.9
qlinear = QuantizedLinear(10, 5, bias=False, ema_decay=ema_decay, mode="EMA")
for i in range(5):
x = torch.randn(3, 10)
tmp_input_thresh = x.abs().max()
if i == 0:
input_ema = tmp_input_thresh
else:
input_ema -= (1 - ema_decay) * (input_ema - tmp_input_thresh)
y = (
fake_quantize_np(x, get_scale(8, input_ema), 8) @ qlinear.fake_quantized_weight.t()
).detach()
tmp_output_thresh = y.abs().max()
if i == 0:
output_ema = tmp_output_thresh
else:
output_ema -= (1 - ema_decay) * (output_ema - tmp_output_thresh)
y = fake_quantize_np(y, get_scale(8, output_ema), 8)
y_hat = qlinear(x)
self.assertTrue((y == y_hat).all())
self.assertEqual(qlinear.input_thresh, input_ema)
self.assertEqual(qlinear.output_thresh, output_ema)
def test_ema_quantization_data_parallel(self):
if not torch.cuda.is_available() or torch.cuda.device_count() <= 1:
return
ema_decay = 0.9
fake_quantize = FakeLinearQuantizationWithSTE().apply
qlinear = nn.DataParallel(
QuantizedLinear(10, 5, bias=False, ema_decay=ema_decay, mode="EMA")
).cuda()
for i in range(5):
x = torch.randn(2, 10).cuda()
tmp_input_thresh = x[0].abs().max()
if i == 0:
input_ema = tmp_input_thresh
else:
input_ema -= (1 - ema_decay) * (input_ema - tmp_input_thresh)
y = (
fake_quantize(x, get_scale(8, input_ema), 8)
@ qlinear.module.fake_quantized_weight.t()
).detach()
tmp_output_thresh = y[0].abs().max()
if i == 0:
output_ema = tmp_output_thresh
else:
output_ema -= (1 - ema_decay) * (output_ema - tmp_output_thresh)
qlinear(x)
self.assertEqual(qlinear.module.input_thresh, input_ema)
self.assertEqual(qlinear.module.output_thresh, output_ema)
def test_start_quantization_delay(self):
quantization_delay = 2
qlinear = QuantizedLinear(10, 5, start_step=quantization_delay, mode="DYNAMIC")
linear = nn.Linear(10, 5)
linear.weight.data = qlinear.weight
linear.bias.data = qlinear.bias
for _ in range(quantization_delay):
x = torch.randn(3, 10)
qy = qlinear(x)
y = linear(x)
self.assertTrue((y == qy).all())
qy = qlinear(x)
self.assertFalse((y == qy).all())
def test_start_quantization_delay_data_parallel(self):
if not torch.cuda.is_available():
return
quantization_delay = 2
qlinear = QuantizedLinear(10, 5, start_step=quantization_delay, mode="DYNAMIC")
linear = nn.Linear(10, 5)
linear.weight.data = qlinear.weight
linear.bias.data = qlinear.bias
qlinear = nn.DataParallel(qlinear).cuda()
linear = nn.DataParallel(linear).cuda()
for _ in range(quantization_delay):
x = torch.randn(3, 10).cuda()
qy = qlinear(x)
y = linear(x)
self.assertTrue((y == qy).all())
qy = qlinear(x)
self.assertFalse((y == qy).all())
def test_dynamic_quantized_linear_backward(self):
x = torch.randn(1, 100, requires_grad=True)
linear = QuantizedLinear(100, 1, bias=False, mode="DYNAMIC")
y = linear(x)
y.backward()
self.assertTrue((x.grad == linear.fake_quantized_weight).all())
with torch.no_grad():
scale = (2 ** (8 - 1) - 1) / x.abs().max()
self.assertTrue((fake_quantize_np(x.detach(), scale, 8) == linear.weight.grad).all())
def test_training_and_inference_differences_ema(self):
qlinear = QuantizedLinear(10, 5, mode="EMA", bias=False)
x = torch.randn(3, 10) * 2 + 0.1
y = qlinear(x)
qlinear.eval()
y_hat = qlinear(x)
self.assertTrue((y - y_hat).norm() < 1e-6)
def test_training_and_inference_differences_dynamic(self):
qlinear = QuantizedLinear(10, 5, bias=False)
x = torch.randn(3, 10) * 2 + 0.1
y = qlinear(x)
qlinear.eval()
y_hat = qlinear(x)
self.assertTrue((y - y_hat).norm() < 1e-6)
def test_none_quantized_linear(self):
qlinear = QuantizedLinear(10, 5, mode="NONE")
linear = nn.Linear(10, 5)
linear.weight.data = qlinear.weight
linear.bias.data = qlinear.bias
x = torch.randn(3, 10)
y = linear(x)
y_hat = qlinear(x)
self.assertTrue((y - y_hat).norm() < 1e-6)
def test_export_to_8bit_with_bias(self):
qlinear = QuantizedLinear(10, 5, mode="EMA")
qlinear.eval()
state_dict = qlinear.state_dict()
self.assertTrue("weight" in state_dict)
self.assertTrue("bias" in state_dict)
self.assertTrue("quantized_weight" not in state_dict)
self.assertTrue("_quantized_bias" not in state_dict)
self.assertTrue("bias_scale" not in state_dict)
qlinear.mode_8bit = True
state_dict = qlinear.state_dict()
self.assertTrue("weight" not in state_dict)
self.assertTrue("bias" not in state_dict)
self.assertTrue("quantized_weight" in state_dict)
self.assertTrue(state_dict["quantized_weight"].dtype == torch.int8)
self.assertTrue("_quantized_bias" in state_dict)
self.assertTrue(state_dict["_quantized_bias"].dtype == torch.int32)
self.assertTrue("bias_scale" in state_dict)
qlinear.mode_8bit = False
state_dict = qlinear.state_dict()
self.assertTrue("weight" in state_dict)
self.assertTrue("bias" in state_dict)
self.assertTrue("quantized_weight" not in state_dict)
self.assertTrue("_quantized_bias" not in state_dict)
self.assertTrue("bias_scale" not in state_dict)
def test_export_to_8bit_without_bias(self):
qlinear = QuantizedLinear(10, 5, bias=False, mode="EMA")
qlinear.eval()
qlinear.mode_8bit = True
state_dict = qlinear.state_dict()
self.assertTrue("weight" not in state_dict)
self.assertTrue("bias" not in state_dict)
self.assertTrue("quantized_weight" in state_dict)
self.assertTrue(state_dict["quantized_weight"].dtype == torch.int8)
self.assertTrue("_quantized_bias" not in state_dict)
self.assertTrue("bias_scale" not in state_dict)
qlinear.mode_8bit = False
state_dict = qlinear.state_dict()
self.assertTrue("weight" in state_dict)
self.assertTrue("bias" not in state_dict)
self.assertTrue("quantized_weight" not in state_dict)
self.assertTrue("_quantized_bias" not in state_dict)
self.assertTrue("bias_scale" not in state_dict)
def test_import_from_8bit_without_bias(self):
exporter = QuantizedLinear(10, 5, bias=False, mode="dynamic")
exporter.eval()
exporter.mode_8bit = True
state_dict = exporter.state_dict()
exporter.mode_8bit = False
importer = QuantizedLinear(10, 5, bias=False, mode="dynamic")
self.assertTrue((exporter.weight != importer.weight).any())
importer.eval()
importer.load_state_dict(state_dict, strict=False)
x = torch.randn(3, 10)
self.assertTrue((exporter(x) == importer(x)).all())
def test_import_from_8bit_with_bias(self):
# QuantizationMode dynamic
exporter = QuantizedLinear(10, 5, mode="dynamic")
exporter.eval()
exporter.mode_8bit = True
state_dict = exporter.state_dict()
exporter.mode_8bit = False
importer = QuantizedLinear(10, 5, mode="dynamic")
self.assertTrue((exporter.weight != importer.weight).any())
self.assertTrue((exporter.bias != importer.bias).any())
importer.eval()
importer.load_state_dict(state_dict, strict=False)
x = torch.randn(3, 10)
self.assertTrue((exporter(x) == importer(x)).all())
# QuantizationMode ema
exporter = QuantizedLinear(10, 5, requantize_output=False, mode="ema")
x = torch.randn(3, 10)
exporter(x)
self.assertTrue(exporter.input_thresh != 0.0)
exporter.eval()
exporter.mode_8bit = True
state_dict = exporter.state_dict()
exporter.mode_8bit = False
importer = QuantizedLinear(10, 5, requantize_output=False, mode="ema")
self.assertTrue((exporter.weight != importer.weight).any())
self.assertTrue((exporter.bias != importer.bias).any())
importer.eval()
importer.load_state_dict(state_dict, strict=False)
self.assertTrue((exporter(x) == importer(x)).all())
def test_train_block_when_loading_quantized_model(self):
exporter = QuantizedLinear(10, 5, mode="dynamic")
exporter.eval()
exporter.mode_8bit = True
state_dict = exporter.state_dict()
importer = QuantizedLinear(10, 5, mode="dynamic")
importer.eval()
importer.load_state_dict(state_dict, strict=False)
with self.assertRaises(RuntimeError):
importer.train()
def test_restrict_loading_to_train_model(self):
exporter = QuantizedLinear(10, 5, mode="dynamic")
exporter.eval()
exporter.mode_8bit = True
state_dict = exporter.state_dict()
importer = QuantizedLinear(10, 5, mode="dynamic")
with self.assertRaises(RuntimeError):
importer.load_state_dict(state_dict, strict=False)
class QuantizedEmbeddingTest(unittest.TestCase):
def test_quantized_embedding_training_forward(self):
embedding = QuantizedEmbedding(10, 3, mode="ema")
with torch.no_grad():
scale = 127.0 / embedding.weight.abs().max()
self.assertTrue(
(
embedding.fake_quantized_weight
== fake_quantize_np(embedding.weight.detach(), scale, 8)
).all()
)
embedding.weight.data = (
torch.randn_like(embedding.weight).mul(127.0).round().clamp(-127.0, 127.0)
)
indices = torch.tensor(np.arange(10))
ground = F.embedding(indices, embedding.weight)
quantized = embedding(indices)
self.assertTrue((ground == quantized).all())
def test_quantized_embedding_inference_forward(self):
embedding = QuantizedEmbedding(10, 3, mode="ema")
with torch.no_grad():
scale = 127.0 / embedding.weight.abs().max()
self.assertTrue(
(
embedding.fake_quantized_weight
== fake_quantize_np(embedding.weight.detach(), scale, 8)
).all()
)
embedding.weight.data = (
torch.randn_like(embedding.weight).mul(127.0).round().clamp(-127.0, 127.0)
)
indices = torch.tensor(np.arange(10))
embedding.eval()
ground = F.embedding(indices, embedding.weight)
quantized = embedding(indices)
self.assertTrue((ground == quantized).all())
def test_quantized_embedding_backward(self):
embedding = QuantizedEmbedding(10, 3, mode="ema")
linear = nn.Linear(3, 1)
indices = torch.tensor([2])
h = embedding(indices)
y = linear(h)
y.backward()
grad = torch.zeros_like(embedding.weight)
grad[indices.item(), :] = linear.weight.t().squeeze()
self.assertTrue((embedding.weight.grad == grad).all())
self.assertTrue((linear.weight.grad == h).all())
def test_delay_quantization_start(self):
qembedding = QuantizedEmbedding(10, 3, mode="ema", start_step=1)
embedding = nn.Embedding(10, 3)
embedding.weight.data = qembedding.weight
indices = torch.tensor(np.arange(10))
self.assertTrue((embedding(indices) == qembedding(indices)).all())
self.assertTrue((embedding(indices) != qembedding(indices)).any())
def test_quantization_turned_off(self):
qembedding = QuantizedEmbedding(10, 3, mode="none")
embedding = nn.Embedding(10, 3)
embedding.weight.data = qembedding.weight
indices = torch.tensor(np.arange(10))
self.assertTrue((embedding(indices) == qembedding(indices)).all())
self.assertTrue((embedding(indices) == qembedding(indices)).all())
def test_export_to_8bit(self):
qembed = QuantizedEmbedding(10, 5, mode="EMA")
qembed.eval()
state_dict = qembed.state_dict()
self.assertTrue("quantized_weight" not in state_dict)
self.assertTrue("weight" in state_dict)
qembed.mode_8bit = True
state_dict = qembed.state_dict()
self.assertTrue("quantized_weight" in state_dict)
self.assertTrue(state_dict["quantized_weight"].dtype == torch.int8)
self.assertTrue("weight" not in state_dict)
qembed.mode_8bit = False
state_dict = qembed.state_dict()
self.assertTrue("quantized_weight" not in state_dict)
self.assertTrue("weight" in state_dict)
def test_load_from_8bit(self):
exporter = QuantizedEmbedding(10, 5, mode="EMA")
exporter.eval()
exporter.mode_8bit = True
state_dict = exporter.state_dict()
exporter.mode_8bit = False
importer = QuantizedEmbedding(10, 5, mode="EMA")
self.assertTrue((exporter.weight != importer.weight).any())
importer.eval()
importer.load_state_dict(state_dict, strict=False)
indices = torch.tensor(np.arange(10))
self.assertTrue((exporter(indices) == importer(indices)).all())
| 18,267 | 41.385151 | 100 | py |
nlp-architect | nlp-architect-master/tests/test_data_utils.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import math
import os
import torch
from nlp_architect.data.utils import split_column_dataset
from tests.utils import count_examples
from nlp_architect.nn.torch.data.dataset import CombinedTensorDataset
from torch.utils.data import TensorDataset
def test_concat_dataset():
token_ids = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.long)
label_ids = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.long)
labeled_dataset = TensorDataset(token_ids, label_ids)
unlabeled_dataset = TensorDataset(token_ids)
concat_dataset = CombinedTensorDataset([labeled_dataset, unlabeled_dataset])
expected_tokens = torch.tensor(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.long
)
expected_labels = torch.tensor(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=torch.long
)
assert torch.equal(concat_dataset.tensors[0], expected_tokens)
assert torch.equal(concat_dataset.tensors[1], expected_labels)
def test_split_dataset():
current_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(current_dir, "fixtures/data/distillation")
num_of_examples = count_examples(data_dir + os.sep + "train.txt")
labeled_precentage = 0.4
unlabeled_precentage = 0.5
if os.path.exists(data_dir):
labeled_file = "labeled.txt"
unlabeled_file = "unlabeled.txt"
split_column_dataset(
dataset=os.path.join(data_dir, "train.txt"),
first_count=math.ceil(num_of_examples * labeled_precentage),
second_count=math.ceil(num_of_examples * unlabeled_precentage),
out_folder=data_dir,
first_filename=labeled_file,
second_filename=unlabeled_file,
)
check_labeled_count = count_examples(data_dir + os.sep + labeled_file)
assert check_labeled_count == math.ceil(num_of_examples * labeled_precentage)
check_unlabeled_count = count_examples(data_dir + os.sep + unlabeled_file)
assert check_unlabeled_count == math.ceil(num_of_examples * unlabeled_precentage)
os.remove(data_dir + os.sep + "labeled.txt")
os.remove(data_dir + os.sep + "unlabeled.txt")
| 2,991 | 45.030769 | 92 | py |
nlp-architect | nlp-architect-master/tests/test_ner_taggers.py | import argparse
import os
import tempfile
import shutil
import torch
from nlp_architect.procedures import TrainTagger
from nlp_architect.nn.torch.modules.embedders import IDCNN, CNNLSTM
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = os.path.join(CURRENT_DIR, "fixtures/conll_sample")
OUTPUT_DIR = tempfile.mkdtemp()
PARSER = argparse.ArgumentParser()
TRAIN_PROCEDURE = TrainTagger()
TRAIN_PROCEDURE.add_arguments(PARSER)
EMBEDDINGS_PATH = None
BATCH_SIZE = 128
LEARNING_RATE = 0.0008
EPOCHS = 1
TRAIN_FILENAME = "data.txt"
def test_taggers():
words = torch.tensor([[1, 2, 3, 4, 5, 0, 0, 0]], dtype=torch.long) # (1,8)
word_chars = torch.tensor(
[[[1, 2, 0], [3, 0, 0], [4, 5, 0], [5, 4, 3], [2, 2, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
dtype=torch.long,
) # (1, 8, 3)
shapes = torch.tensor([[1, 2, 3, 3, 3, 0, 0, 0]], dtype=torch.long) # (1,8)
mask = torch.tensor([[1, 1, 1, 1, 1, 0, 0, 0]], dtype=torch.long) # (1,8)
labels = torch.tensor([[1, 1, 3, 4, 1, 0, 0, 0]], dtype=torch.long) # (1,8)
word_vocab_size = 5
label_vocab_size = 4
inputs = {
"words": words,
"word_chars": word_chars,
"shapes": shapes,
"mask": mask,
"labels": labels,
}
idcnn_model = IDCNN(word_vocab_size + 1, label_vocab_size + 1)
lstm_model = CNNLSTM(word_vocab_size + 1, label_vocab_size + 1)
expected_output_shape = torch.Size([1, 8, label_vocab_size + 1])
idcnn_logits = idcnn_model(**inputs)
assert idcnn_logits.shape == expected_output_shape
lstm_logits = lstm_model(**inputs)
assert lstm_logits.shape == expected_output_shape
def test_tagging_procedure_sanity():
# run idcnn softmax
model_type = "id-cnn"
idcnn_softmax_args = PARSER.parse_args(
[
"--data_dir",
DATA_DIR,
"--output_dir",
OUTPUT_DIR,
"--embedding_file",
EMBEDDINGS_PATH,
"-b",
str(BATCH_SIZE),
"--lr",
str(LEARNING_RATE),
"-e",
str(EPOCHS),
"--train_filename",
TRAIN_FILENAME,
"--dev_filename",
TRAIN_FILENAME,
"--test_filename",
TRAIN_FILENAME,
"--model_type",
model_type,
"--overwrite_output_dir",
]
)
TRAIN_PROCEDURE.run_procedure(idcnn_softmax_args)
# run idcnn crf
idcnn_crf_args = PARSER.parse_args(
[
"--data_dir",
DATA_DIR,
"--output_dir",
OUTPUT_DIR,
"--embedding_file",
EMBEDDINGS_PATH,
"-b",
str(BATCH_SIZE),
"--lr",
str(LEARNING_RATE),
"-e",
str(EPOCHS),
"--train_filename",
TRAIN_FILENAME,
"--dev_filename",
TRAIN_FILENAME,
"--test_filename",
TRAIN_FILENAME,
"--model_type",
model_type,
"--use_crf",
"--overwrite_output_dir",
]
)
TRAIN_PROCEDURE.run_procedure(idcnn_crf_args)
# run lstm softmax
model_type = "cnn-lstm"
lstm_softmax_args = PARSER.parse_args(
[
"--data_dir",
DATA_DIR,
"--output_dir",
OUTPUT_DIR,
"--embedding_file",
EMBEDDINGS_PATH,
"-b",
str(BATCH_SIZE),
"--lr",
str(LEARNING_RATE),
"-e",
str(EPOCHS),
"--train_filename",
TRAIN_FILENAME,
"--dev_filename",
TRAIN_FILENAME,
"--test_filename",
TRAIN_FILENAME,
"--model_type",
model_type,
"--overwrite_output_dir",
]
)
TRAIN_PROCEDURE.run_procedure(lstm_softmax_args)
# run lstm crf
lstm_crf_args = PARSER.parse_args(
[
"--data_dir",
DATA_DIR,
"--output_dir",
OUTPUT_DIR,
"--embedding_file",
EMBEDDINGS_PATH,
"-b",
str(BATCH_SIZE),
"--lr",
str(LEARNING_RATE),
"-e",
str(EPOCHS),
"--train_filename",
TRAIN_FILENAME,
"--dev_filename",
TRAIN_FILENAME,
"--test_filename",
TRAIN_FILENAME,
"--model_type",
model_type,
"--use_crf",
"--overwrite_output_dir",
]
)
TRAIN_PROCEDURE.run_procedure(lstm_crf_args)
# remove output files
shutil.rmtree(OUTPUT_DIR)
assert True
| 4,727 | 26.017143 | 99 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/torch/quantization.py | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# pylint: disable=no-member
"""
Quantization ops
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from enum import Enum, auto
import logging
from abc import ABC, abstractmethod
import torch
from torch import nn
from torch.nn import functional as F
from nlp_architect.common import Config
logger = logging.getLogger(__name__)
def get_dynamic_scale(x, bits, with_grad=False):
"""Calculate dynamic scale for quantization from input by taking the
maximum absolute value from x and number of bits"""
with torch.set_grad_enabled(with_grad):
threshold = x.abs().max()
return get_scale(bits, threshold)
def get_scale(bits, threshold):
"""Calculate scale for quantization according to some constant and number of bits"""
return calc_max_quant_value(bits) / threshold
def calc_max_quant_value(bits):
"""Calculate the maximum symmetric quantized value according to number of bits"""
return 2 ** (bits - 1) - 1
def quantize(input, scale, bits):
"""Do linear quantization to input according to a scale and number of bits"""
thresh = calc_max_quant_value(bits)
return input.mul(scale).round().clamp(-thresh, thresh)
def dequantize(input, scale):
"""linear dequantization according to some scale"""
return input.div(scale)
# TODO(ofir) future work, implement a layer that uses this function that gives a more comfortable
class FakeLinearQuantizationWithSTE(torch.autograd.Function):
"""Simulates error caused by quantization. Uses Straight-Through Estimator for Back prop"""
@staticmethod
def forward(ctx, input, scale, bits=8):
"""fake quantize input according to scale and number of bits, dequantize
quantize(input))"""
return dequantize(quantize(input, scale, bits), scale)
@staticmethod
def backward(ctx, grad_output):
"""Calculate estimated gradients for fake quantization using
Straight-Through Estimator (STE) according to:
https://openreview.net/pdf?id=B1ae1lZRb"""
return grad_output, None, None
class QuantizationMode(Enum):
NONE = auto()
DYNAMIC = auto()
EMA = auto()
_fake_quantize = FakeLinearQuantizationWithSTE.apply
class QuantizedLayer(ABC):
"""Quantized Layer interface"""
CONFIG_ATTRIBUTES = ["weight_bits", "start_step", "mode"]
REPR_ATTRIBUTES = ["mode", "weight_bits"]
def __init__(self, *args, weight_bits=8, start_step=0, mode="none", **kwargs):
if weight_bits < 2:
raise ValueError(f"weight_bits={weight_bits} must be higher than 1 ")
super().__init__(*args, **kwargs)
self.weight_bits = weight_bits
self.mode = QuantizationMode[mode.upper()]
self.start_step = start_step
self.register_buffer("_step", torch.zeros(1))
# buffers for inference
self.register_buffer("quantized_weight", None)
self.register_buffer("_weight_scale", None)
# handle import and export in 8bit
self.mode_8bit = False
self._imported_from_quantized = False
# register saving hook
self._register_state_dict_hook(self._state_dict_hook)
def forward(self, input):
if self.mode == QuantizationMode.NONE:
return super().forward(input)
if self.training:
if self._step >= self.start_step:
out = self.training_quantized_forward(input)
else:
out = super().forward(input)
self._step += 1
else:
out = self.inference_quantized_forward(input)
return out
@abstractmethod
def training_quantized_forward(self, input):
"""Implement forward method to be used while training"""
@abstractmethod
def inference_quantized_forward(self, input):
"""Implement forward method to be used while evaluating"""
@classmethod
def from_config(cls, *args, config=None, **kwargs):
"""Initialize quantized layer from config"""
return cls(*args, **kwargs, **{k: getattr(config, k) for k in cls.CONFIG_ATTRIBUTES})
@property
def fake_quantized_weight(self):
return _fake_quantize(self.weight, self.weight_scale, self.weight_bits)
@property
def weight_scale(self):
return (
get_dynamic_scale(self.weight, self.weight_bits)
if self.training
else self._weight_scale
)
def train(self, mode=True):
"""handle transition between quantized model and simulated quantization"""
if self.training != mode:
if mode:
if self._imported_from_quantized:
raise RuntimeError(
"Model imported from quantized checkpoint cannot be moved to \
training mode"
)
self._train()
else:
self._eval()
super().train(mode)
def _train(self):
"""function to be called by self.train(mode=True) which modifies modules attributes\
according to the model"""
def _eval(self):
"""function to be called by self.train(mode=False), or eval() which modifies modules\
attributes according to the model"""
self._weight_scale = self.weight_scale
self.quantized_weight = quantize(self.weight, self.weight_scale, self.weight_bits)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
"""check if model is loaded from quantized checkpoint or regular checkpoint"""
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
if state_dict.get(prefix + "quantized_weight", None) is not None:
if self.training:
raise RuntimeError(
"Can't load quantized model in training mode, first change model's \
to evaluation and then load the saved model"
)
self._imported_from_quantized = True
@staticmethod
def _state_dict_hook(module, state_dict, prefix, local_metadata):
"""hook to be registered to module when exporting the model to 8bit, can be overrided\
to customize to layer behaviour"""
if module.mode_8bit and module.mode != QuantizationMode.NONE:
state_dict.pop(prefix + "weight", None)
state_dict.pop(prefix + "_step", None)
state_dict[prefix + "quantized_weight"] = state_dict[prefix + "quantized_weight"].char()
else:
state_dict.pop(prefix + "quantized_weight", None)
state_dict.pop(prefix + "_weight_scale", None)
def extra_repr(self):
s = ""
for entry in self.REPR_ATTRIBUTES:
s += f", {entry}={getattr(self, entry)}"
return super().extra_repr() + s
class QuantizedLinear(QuantizedLayer, nn.Linear):
"""Linear layer with quantization aware training capability"""
CONFIG_ATTRIBUTES = QuantizedLayer.CONFIG_ATTRIBUTES + [
"activation_bits",
"requantize_output",
"ema_decay",
]
REPR_ATTRIBUTES = QuantizedLayer.REPR_ATTRIBUTES + [
"activation_bits",
"accumulation_bits",
"ema_decay",
"requantize_output",
]
def __init__(
self, *args, activation_bits=8, requantize_output=True, ema_decay=0.9999, **kwargs
):
super().__init__(*args, **kwargs)
if activation_bits < 2:
raise ValueError(f"activation_bits={activation_bits} must be higher than 1 ")
self.activation_bits = activation_bits
self.accumulation_bits = 32
self.ema_decay = ema_decay
self.requantize_output = requantize_output
self.register_buffer("input_thresh", torch.zeros(1))
if self.requantize_output:
self.register_buffer("output_thresh", torch.zeros(1))
# real quantization
if kwargs.get("bias", True):
self.register_buffer("_quantized_bias", None)
self.register_buffer("bias_scale", None)
def training_quantized_forward(self, input):
"""fake quantized forward, fake quantizes weights and activations,
learn quantization ranges if quantization mode is EMA.
This function should only be used while training"""
assert self.training, "should only be called when training"
if self.mode == QuantizationMode.EMA:
self._update_ema(self.input_thresh, input.detach())
input_scale = self._get_input_scale(input)
out = F.linear(
_fake_quantize(input, input_scale, self.activation_bits),
self.fake_quantized_weight,
self.bias,
)
if self.requantize_output:
if self.mode == QuantizationMode.EMA:
self._update_ema(self.output_thresh, out.detach())
out = _fake_quantize(out, self._get_output_scale(out), self.activation_bits)
return out
def inference_quantized_forward(self, input):
"""Simulate quantized inference. quantize input and perform calculation with only integer numbers.
This function should only be used while doing inference"""
assert not self.training, "should only be called when not training"
input_scale = self._get_input_scale(input)
self.bias_scale = self.weight_scale * input_scale
quantized_input = quantize(input, input_scale, self.activation_bits)
out = F.linear(quantized_input, self.quantized_weight, self.quantized_bias)
# TODO(ofir) fuse the operation of requantization with dequantiz
out = dequantize(out, self.bias_scale)
if self.requantize_output:
output_scale = self._get_output_scale(out)
out = dequantize(quantize(out, output_scale, self.activation_bits), output_scale)
return out
def _eval(self):
super()._eval()
if self.mode == QuantizationMode.EMA and self.bias is not None:
self.bias_scale = self._get_input_scale() * self.weight_scale
self.quantized_bias = quantize(self.bias, self.bias_scale, self.accumulation_bits)
@staticmethod
def _state_dict_hook(module, state_dict, prefix, local_metadata):
"""hook to be registered to module when exporting the model to 8bit,\
can be overrided to customize to layer behaviour"""
super()._state_dict_hook(module, state_dict, prefix, local_metadata)
if module.mode_8bit:
if module.mode == QuantizationMode.EMA:
state_dict.pop(prefix + "bias", None)
try:
state_dict[prefix + "_quantized_bias"] = state_dict[
prefix + "_quantized_bias"
].int()
except KeyError:
# in case there is no bias dont do anything
pass
else:
state_dict.pop(prefix + "_quantized_bias", None)
state_dict.pop(prefix + "bias_scale", None)
@property
def quantized_bias(self):
try:
if self.mode == QuantizationMode.EMA:
bias = self._quantized_bias
elif self.mode == QuantizationMode.DYNAMIC:
bias = quantize(self.bias, self.bias_scale, self.accumulation_bits)
else:
raise RuntimeError(f"Unknown quantization mode: {self.mode}")
except AttributeError:
bias = None
return bias
@quantized_bias.setter
def quantized_bias(self, value):
self._quantized_bias = value
def _get_input_scale(self, input=None):
return self._get_activation_scale(input, self.input_thresh)
def _get_output_scale(self, output=None):
return self._get_activation_scale(output, self.output_thresh)
def _get_activation_scale(self, activation, threshold):
if self.mode == QuantizationMode.DYNAMIC:
scale = get_dynamic_scale(activation, self.activation_bits)
elif self.mode == QuantizationMode.EMA:
scale = get_scale(self.activation_bits, threshold)
return scale
def _update_ema(self, ema, input, reduce_fn=lambda x: x.abs().max()):
"""Update exponential moving average (EMA) of activations thresholds.
the reduce_fn calculates the current threshold from the input tensor"""
assert self._step >= self.start_step
if self._step == self.start_step:
ema.fill_(reduce_fn(input))
else:
ema.sub_((1 - self.ema_decay) * (ema - reduce_fn(input)))
class QuantizedEmbedding(QuantizedLayer, nn.Embedding):
"""Embedding layer with quantization aware training capability"""
def training_quantized_forward(self, input):
"""Return quantized embeddings"""
assert self.training, "should only be called when training"
return F.embedding(
input,
self.fake_quantized_weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
def inference_quantized_forward(self, input):
"""forward to be used during inference"""
assert not self.training, "should only be called when not training"
q_embeddings = F.embedding(
input,
self.quantized_weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return dequantize(q_embeddings, self.weight_scale)
class QuantizationConfig(Config):
"""Quantization Configuration Object"""
ATTRIBUTES = {
"activation_bits": 8,
"weight_bits": 8,
"mode": "none",
"start_step": 0,
"ema_decay": 0.9999,
"requantize_output": True,
}
| 14,729 | 37.25974 | 106 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/torch/__init__.py | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import random
import time
import numpy as np
import torch
def setup_backend(no_cuda):
"""Setup backend according to selected backend and detected configuration"""
device = torch.device("cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
if torch.cuda.is_available() and not no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cpu")
n_gpu = 0
return device, n_gpu
def set_seed(seed, n_gpus=None):
"""set and return seed"""
if seed == -1:
seed = int(time.time())
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpus is not None and n_gpus > 0:
torch.cuda.manual_seed_all(seed)
return seed
| 1,518 | 32.755556 | 89 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/torch/distillation.py | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import argparse
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from nlp_architect.models import TrainableModel
logger = logging.getLogger(__name__)
MSE_loss = nn.MSELoss(reduction="mean")
KL_loss = nn.KLDivLoss(reduction="batchmean")
losses = {
"kl": KL_loss,
"mse": MSE_loss,
}
TEACHER_TYPES = ["bert"]
class TeacherStudentDistill:
"""
Teacher-Student knowledge distillation helper.
Use this object when training a model with KD and a teacher model.
Args:
teacher_model (TrainableModel): teacher model
temperature (float, optional): KD temperature. Defaults to 1.0.
dist_w (float, optional): distillation loss weight. Defaults to 0.1.
loss_w (float, optional): student loss weight. Defaults to 1.0.
loss_function (str, optional): loss function to use (kl for KLDivLoss,
mse for MSELoss)
"""
def __init__(
self,
teacher_model: TrainableModel,
temperature: float = 1.0,
dist_w: float = 0.1,
loss_w: float = 1.0,
loss_function="kl",
):
self.teacher = teacher_model
self.t = temperature
self.dist_w = dist_w
self.loss_w = loss_w
self.loss_fn = losses.get(loss_function, KL_loss)
def get_teacher_logits(self, inputs):
"""
Get teacher logits
Args:
inputs: input
Returns:
teachr logits
"""
return self.teacher.get_logits(inputs)
@staticmethod
def add_args(parser: argparse.ArgumentParser):
"""
Add KD arguments to parser
Args:
parser (argparse.ArgumentParser): parser
"""
parser.add_argument(
"--teacher_model_path", type=str, required=True, help="Path to teacher model"
)
parser.add_argument(
"--teacher_model_type",
type=str,
required=True,
choices=TEACHER_TYPES,
help="Teacher model class type",
)
parser.add_argument("--kd_temp", type=float, default=1.0, help="KD temperature value")
parser.add_argument(
"--kd_loss_fn", type=str, choices=["kl", "mse"], default="mse", help="KD loss function"
)
parser.add_argument("--kd_dist_w", type=float, default=0.1, help="KD weight on loss")
parser.add_argument(
"--kd_student_w", type=float, default=1.0, help="KD student weight on loss"
)
def distill_loss(self, loss, student_logits, teacher_logits):
"""
Add KD loss
Args:
loss: student loss
student_logits: student model logits
teacher_logits: teacher model logits
Returns:
KD loss
"""
student_log_sm = F.log_softmax(student_logits / self.t, dim=-1)
teacher_log_sm = F.softmax(teacher_logits / self.t, dim=-1)
distill_loss = self.loss_fn(input=student_log_sm, target=teacher_log_sm)
return (self.loss_w * loss) + (distill_loss * self.dist_w * (self.t ** 2))
def distill_loss_dict(self, loss, student_logits_dict, teacher_logits_dict):
"""
Add KD loss
Args:
loss: student loss
student_logits: student model logits
teacher_logits: teacher model logits
Returns:
KD loss
"""
student_sm_dict = {}
for i in range(len(student_logits_dict.keys())):
student_sm_dict[i] = F.log_softmax(student_logits_dict[i] / self.t, dim=-1)
teacher_sm_dict = {}
for i in range(len(teacher_logits_dict.keys())):
teacher_sm_dict[i] = F.softmax(teacher_logits_dict[i] / self.t, dim=-1)
distill_losses = [
self.loss_fn(input=student_sm_dict[i], target=teacher_sm_dict[i])
for i in range(len(student_sm_dict.keys()))
]
distill_loss = torch.mean(torch.stack(distill_losses))
return (self.loss_w * loss) + (distill_loss * self.dist_w * (self.t ** 2))
| 4,826 | 31.18 | 99 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/torch/modules/embedders.py | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import os
from typing import List
import torch
from torch import nn as nn
import torch.nn.functional as F
from nlp_architect.utils.io import load_json_file
from nlp_architect.utils.text import n_letters
class CNNLSTM(nn.Module):
"""CNN-LSTM embedder (based on Ma and Hovy. 2016)
Args:
word_vocab_size (int): word vocabulary size
num_labels (int): number of labels (classifier)
word_embedding_dims (int, optional): word embedding dims
char_embedding_dims (int, optional): character embedding dims
cnn_kernel_size (int, optional): character CNN kernel size
cnn_num_filters (int, optional): character CNN number of filters
lstm_hidden_size (int, optional): LSTM embedder hidden size
lstm_layers (int, optional): num of LSTM layers
bidir (bool, optional): apply bi-directional LSTM
dropout (float, optional): dropout rate
padding_idx (int, optinal): padding number for embedding layers
"""
def __init__(
self,
word_vocab_size: int,
num_labels: int,
word_embedding_dims: int = 100,
char_embedding_dims: int = 16,
cnn_kernel_size: int = 3,
cnn_num_filters: int = 128,
lstm_hidden_size: int = 100,
lstm_layers: int = 2,
bidir: bool = True,
dropout: float = 0.5,
padding_idx: int = 0,
):
super(CNNLSTM, self).__init__()
self.word_embedding_dim = word_embedding_dims
self.word_embeddings = nn.Embedding(
word_vocab_size, word_embedding_dims, padding_idx=padding_idx
)
self.char_embeddings = nn.Embedding(
n_letters + 1, char_embedding_dims, padding_idx=padding_idx
)
self.conv1 = nn.Conv1d(
in_channels=char_embedding_dims,
out_channels=cnn_num_filters,
kernel_size=cnn_kernel_size,
padding=int(cnn_kernel_size / 2),
)
self.relu = nn.ReLU()
self.lstm = nn.LSTM(
input_size=word_embedding_dims + cnn_num_filters,
hidden_size=lstm_hidden_size,
bidirectional=bidir,
batch_first=True,
num_layers=lstm_layers,
)
self.dropout = nn.Dropout(dropout)
self.dense = nn.Linear(
in_features=lstm_hidden_size * 2 if bidir else lstm_hidden_size, out_features=num_labels
)
self.num_labels = num_labels
self.padding_idx = padding_idx
def load_embeddings(self, embeddings):
"""
Load pre-defined word embeddings
Args:
embeddings (torch.tensor): word embedding tensor
"""
self.word_embeddings = nn.Embedding.from_pretrained(
embeddings, freeze=False, padding_idx=self.padding_idx
)
def forward(self, words, word_chars, **kwargs):
"""
CNN-LSTM forward step
Args:
words (torch.tensor): words
word_chars (torch.tensor): word character tensors
Returns:
torch.tensor: logits of model
"""
word_embeds = self.word_embeddings(words)
char_embeds = self.char_embeddings(word_chars)
saved_char_size = char_embeds.size()[:2]
char_embeds = char_embeds.permute(0, 1, 3, 2)
input_size = char_embeds.size()
squashed_shape = [-1] + list(input_size[2:])
char_embeds_reshape = char_embeds.contiguous().view(
*squashed_shape
) # (samples * timesteps, input_size)
char_embeds = self.conv1(char_embeds_reshape)
char_embeds = char_embeds.permute(0, 2, 1)
char_embeds = self.relu(char_embeds)
char_embeds, _ = torch.max(char_embeds, 1) # global max pooling
new_size = saved_char_size + char_embeds.size()[1:]
char_features = char_embeds.contiguous().view(new_size)
features = torch.cat((word_embeds, char_features), -1)
features = self.dropout(features)
self.lstm.flatten_parameters()
lstm_out, _ = self.lstm(features)
lstm_out = self.dropout(lstm_out)
logits = self.dense(lstm_out)
return logits
@classmethod
def from_config(cls, word_vocab_size: int, num_labels: int, config: str):
"""
Load a model from a configuration file
A valid configuration file is a JSON file with fields as in class `__init__`
Args:
word_vocab_size (int): word vocabulary size
num_labels (int): number of labels (classifier)
config (str): path to configuration file
Returns:
CNNLSTM: CNNLSTM module pre-configured
"""
if not os.path.exists(config):
raise FileNotFoundError
cfg = load_json_file(config)
return cls(word_vocab_size=word_vocab_size, num_labels=num_labels, **cfg)
class IDCNN(nn.Module):
"""
ID-CNN (iterated dilated) tagging model (based on Strubell et al 2017) with word character
embedding (using CNN feature extractors)
Args:
word_vocab_size (int): word vocabulary size
num_labels (int): number of labels (classifier)
word_embedding_dims (int, optional): word embedding dims
shape_vocab_size (int, optional): shape vocabulary size
shape_embedding_dims (int, optional): shape embedding dims
char_embedding_dims (int, optional): character embedding dims
char_cnn_filters (int, optional): character CNN kernel size
char_cnn_kernel_size (int, optional): character CNN number of filters
cnn_kernel_size (int, optional): CNN embedder kernel size
cnn_num_filters (int, optional): CNN embedder number of filters
input_dropout (float, optional): input layer (embedding) dropout rate
middle_dropout (float, optional): middle layer dropout rate
hidden_dropout (float, optional): hidden layer dropout rate
blocks (int, optinal): number of blocks
dilations (List, optinal): List of dilations per CNN layer
embedding_pad_idx (int, optional): padding number for embedding layers
use_chars (bool, optional): whether to use char embedding, defaults to False
drop_penalty (float, optional): penalty for dropout regularization
"""
def __init__(
self,
word_vocab_size: int,
num_labels: int,
word_embedding_dims: int = 100,
shape_vocab_size: int = 4,
shape_embedding_dims: int = 5,
char_embedding_dims: int = 16,
char_cnn_filters: int = 128,
char_cnn_kernel_size: int = 3,
cnn_kernel_size: int = 3,
cnn_num_filters: int = 128,
input_dropout: float = 0.35,
middle_dropout: float = 0,
hidden_dropout: float = 0.15,
blocks: int = 1,
dilations: List = None,
embedding_pad_idx: int = 0,
use_chars: bool = False,
drop_penalty: float = 1e-4,
):
super(IDCNN, self).__init__()
if dilations is None:
dilations = [1, 2, 1]
self.num_blocks = blocks
self.dilation = dilations
self.use_chars = use_chars
self.drop_penalty = drop_penalty
self.num_labels = num_labels
self.padding_idx = embedding_pad_idx
self.word_embedding_dim = word_embedding_dims
self.word_embeddings = nn.Embedding(
word_vocab_size, self.word_embedding_dim, padding_idx=self.padding_idx
)
self.shape_embeddings = nn.Embedding(
shape_vocab_size + 1, shape_embedding_dims, padding_idx=self.padding_idx
)
padding_word = int(cnn_kernel_size / 2)
self.char_filters = char_cnn_filters if use_chars else 0
self.conv0 = nn.Conv1d(
in_channels=word_embedding_dims + shape_embedding_dims + self.char_filters,
out_channels=cnn_num_filters,
kernel_size=cnn_kernel_size,
padding=padding_word,
)
self.cnv_layers = []
for i in range(len(self.dilation)):
self.cnv_layers.append(
nn.Conv1d(
in_channels=cnn_num_filters,
out_channels=cnn_num_filters,
kernel_size=cnn_kernel_size,
padding=padding_word * self.dilation[i],
dilation=self.dilation[i],
)
)
self.cnv_layers = nn.ModuleList(self.cnv_layers)
self.dense = nn.Linear(
in_features=(cnn_num_filters * self.num_blocks), out_features=num_labels
)
if use_chars:
padding_char = int(char_cnn_kernel_size / 2)
self.char_embeddings = nn.Embedding(
n_letters + 1, char_embedding_dims, padding_idx=self.padding_idx
)
self.char_conv = nn.Conv1d(
in_channels=char_embedding_dims,
out_channels=self.char_filters,
kernel_size=char_cnn_kernel_size,
padding=padding_char,
)
self.i_drop = nn.Dropout(input_dropout)
self.m_drop = nn.Dropout(middle_dropout)
self.h_drop = nn.Dropout(hidden_dropout)
def forward(self, words, word_chars, shapes, no_dropout=False, **kwargs):
"""
IDCNN forward step
Args:
words (torch.tensor): words
word_chars (torch.tensor): word character tensors
shapes (torch.tensor): words shapes
Returns:
torch.tensor: logits of model
"""
block_scores = []
input_features = []
word_embeds = self.word_embeddings(words)
shape_embeds = self.shape_embeddings(shapes)
input_features.extend([word_embeds, shape_embeds])
if self.use_chars:
char_embeds = self.char_embeddings(word_chars)
saved_char_size = char_embeds.size()[:2]
char_embeds = char_embeds.permute(0, 1, 3, 2)
input_size = char_embeds.size()
squashed_shape = [-1] + list(input_size[2:])
char_embeds_reshape = char_embeds.contiguous().view(*squashed_shape)
char_embeds = self.char_conv(char_embeds_reshape)
char_embeds = char_embeds.permute(0, 2, 1)
char_embeds = F.relu(char_embeds)
char_embeds, _ = torch.max(char_embeds, 1) # global max pooling
new_size = saved_char_size + char_embeds.size()[1:]
char_features = char_embeds.contiguous().view(new_size)
input_features.append(char_features)
features = torch.cat(input_features, 2)
if not no_dropout:
features = self.i_drop(features)
features = features.permute(0, 2, 1)
conv0 = self.conv0(features)
conv0 = F.relu(conv0)
conv_layer = conv0
for _ in range(self.num_blocks):
conv_outputs = []
for j in range(len(self.cnv_layers)):
conv_layer = self.cnv_layers[j](conv_layer)
conv_layer = F.relu(conv_layer)
if j == len(self.cnv_layers) - 1: # currently use only last layer
conv_outputs.append(conv_layer)
layers_concat = torch.cat(conv_outputs, 1)
if not no_dropout:
conv_layer = self.m_drop(layers_concat) # for next block iteration
else:
conv_layer = layers_concat
layers_concat = layers_concat.squeeze(2).permute(0, 2, 1) # for final block scores
if not no_dropout:
block_output = self.h_drop(layers_concat)
else:
block_output = layers_concat
scores = self.dense(block_output)
block_scores.append(scores)
logits = block_scores[-1] # currently use only last block
return logits
@classmethod
def from_config(cls, word_vocab_size: int, num_labels: int, config: str):
"""
Load a model from a configuration file
A valid configuration file is a JSON file with fields as in class `__init__`
Args:
word_vocab_size (int): word vocabulary size
num_labels (int): number of labels (classifier)
config (str): path to configuration file
Returns:
IDCNN: IDCNNEmbedder module pre-configured
"""
if not os.path.exists(config):
raise FileNotFoundError
cfg = load_json_file(config)
return cls(word_vocab_size=word_vocab_size, num_labels=num_labels, **cfg)
def load_embeddings(self, embeddings):
"""
Load pre-defined word embeddings
Args:
embeddings (torch.tensor): word embedding tensor
"""
self.word_embeddings = nn.Embedding.from_pretrained(
embeddings, freeze=False, padding_idx=self.padding_idx
)
| 13,627 | 37.497175 | 100 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/torch/layers/crf.py | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# Module adapted from https://github.com/kmkurn/pytorch-crf
from typing import List, Optional
import torch
import torch.nn as nn
class CRF(nn.Module):
"""Conditional random field.
This module implements a conditional random field [LMP01]_. The forward computation
of this class computes the log likelihood of the given sequence of tags and
emission score tensor. This class also has `~CRF.decode` method which finds
the best tag sequence given an emission score tensor using `Viterbi algorithm`_.
Args:
num_tags: Number of tags.
batch_first: Whether the first dimension corresponds to the size of a minibatch.
Attributes:
start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size
``(num_tags,)``.
end_transitions (`~torch.nn.Parameter`): End transition score tensor of size
``(num_tags,)``.
transitions (`~torch.nn.Parameter`): Transition score tensor of size
``(num_tags, num_tags)``.
.. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).
"Conditional random fields: Probabilistic models for segmenting and
labeling sequence data". *Proc. 18th International Conf. on Machine
Learning*. Morgan Kaufmann. pp. 282–289.
.. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def __init__(self, num_tags: int, batch_first: bool = False) -> None:
if num_tags <= 0:
raise ValueError(f"invalid number of tags: {num_tags}")
super().__init__()
self.num_tags = num_tags
self.batch_first = batch_first
self.start_transitions = nn.Parameter(torch.empty(num_tags))
self.end_transitions = nn.Parameter(torch.empty(num_tags))
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1.
"""
nn.init.uniform_(self.start_transitions, -0.1, 0.1)
nn.init.uniform_(self.end_transitions, -0.1, 0.1)
nn.init.uniform_(self.transitions, -0.1, 0.1)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(num_tags={self.num_tags})"
def forward(
self,
emissions: torch.Tensor,
tags: torch.LongTensor,
mask: Optional[torch.ByteTensor] = None,
reduction: str = "sum",
) -> torch.Tensor:
"""Compute the conditional log likelihood of a sequence of tags given emission scores.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
tags (`~torch.LongTensor`): Sequence of tags tensor of size
``(seq_length, batch_size)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
reduction: Specifies the reduction to apply to the output:
``none|sum|mean|token_mean``. ``none``: no reduction will be applied.
``sum``: the output will be summed over batches. ``mean``: the output will be
averaged over batches. ``token_mean``: the output will be averaged over tokens.
Returns:
`~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if
reduction is ``none``, ``()`` otherwise.
"""
self._validate(emissions, tags=tags, mask=mask)
if reduction not in ("none", "sum", "mean", "token_mean"):
raise ValueError(f"invalid reduction: {reduction}")
if mask is None:
mask = torch.ones_like(tags, dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
# shape: (batch_size,)
numerator = self._compute_score(emissions, tags, mask)
# shape: (batch_size,)
denominator = self._compute_normalizer(emissions, mask)
# shape: (batch_size,)
llh = numerator - denominator
if reduction == "none":
return llh
if reduction == "sum":
return llh.sum()
if reduction == "mean":
return llh.mean()
assert reduction == "token_mean"
return llh.sum() / mask.float().sum()
def decode(
self, emissions: torch.Tensor, mask: Optional[torch.ByteTensor] = None
) -> List[List[int]]:
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
Returns:
List of list containing the best tag sequence for each batch.
"""
self._validate(emissions, mask=mask)
if mask is None:
mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
return self._viterbi_decode(emissions, mask)
def _validate(
self,
emissions: torch.Tensor,
tags: Optional[torch.LongTensor] = None,
mask: Optional[torch.ByteTensor] = None,
) -> None:
if emissions.dim() != 3:
raise ValueError(f"emissions must have dimension of 3, got {emissions.dim()}")
if emissions.size(2) != self.num_tags:
raise ValueError(
f"expected last dimension of emissions is {self.num_tags}, "
f"got {emissions.size(2)}"
)
if tags is not None:
if emissions.shape[:2] != tags.shape:
raise ValueError(
"the first two dimensions of emissions and tags must match, "
f"got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}"
)
if mask is not None:
if emissions.shape[:2] != mask.shape:
raise ValueError(
"the first two dimensions of emissions and mask must match, "
f"got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}"
)
no_empty_seq = not self.batch_first and mask[0].all()
no_empty_seq_bf = self.batch_first and mask[:, 0].all()
if not no_empty_seq and not no_empty_seq_bf:
raise ValueError("mask of the first timestep must all be on")
def _compute_score(
self, emissions: torch.Tensor, tags: torch.LongTensor, mask: torch.ByteTensor
) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# tags: (seq_length, batch_size)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and tags.dim() == 2
assert emissions.shape[:2] == tags.shape
assert emissions.size(2) == self.num_tags
assert mask.shape == tags.shape
assert mask[0].all()
seq_length, batch_size = tags.shape
mask = mask.float()
# Start transition score and first emission
# shape: (batch_size,)
score = self.start_transitions[tags[0]]
score += emissions[0, torch.arange(batch_size), tags[0]]
for i in range(1, seq_length):
# Transition score to next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += self.transitions[tags[i - 1], tags[i]] * mask[i]
# Emission score for next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]
# End transition score
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# shape: (batch_size,)
last_tags = tags[seq_ends, torch.arange(batch_size)]
# shape: (batch_size,)
score += self.end_transitions[last_tags]
return score
def _compute_normalizer(self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length = emissions.size(0)
# Start transition score and first emission; score has size of
# (batch_size, num_tags) where for each batch, the j-th column stores
# the score that the first timestep has tag j
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
# Broadcast score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the sum of scores of all
# possible tag sequences so far that end with transitioning from tag i to tag j
# and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emissions
# Sum over all possible current tags, but we're in score space, so a sum
# becomes a log-sum-exp: for each sample, entry i stores the sum of scores of
# all possible tag sequences so far, that end in tag i
# shape: (batch_size, num_tags)
next_score = torch.logsumexp(next_score, dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Sum (log-sum-exp) over all possible tags
# shape: (batch_size,)
return torch.logsumexp(score, dim=1)
def _viterbi_decode(
self, emissions: torch.FloatTensor, mask: torch.ByteTensor
) -> List[List[int]]:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history = []
# score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# history saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
# Broadcast viterbi score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emission = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the score of the best
# tag sequence so far that ends with transitioning from tag i to tag j and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
# Find the maximum score over all possible current tag
# shape: (batch_size, num_tags)
next_score, indices = next_score.max(dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
history.append(indices)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Now, compute the best path for each sample
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
best_tags_list = []
for idx in range(batch_size):
# Find the tag which maximizes the score at the last timestep; this is our best tag
# for the last timestep
_, best_last_tag = score[idx].max(dim=0)
best_tags = [best_last_tag.item()]
# We trace back where the best last tag comes from, append that to our best tag
# sequence, and trace it back again, and so on
for hist in reversed(history[: seq_ends[idx]]):
best_last_tag = hist[idx][best_tags[-1]]
best_tags.append(best_last_tag.item())
# Reverse the order because we start from the last timestep
best_tags.reverse()
best_tags_list.append(best_tags)
return best_tags_list
| 15,085 | 42.982507 | 99 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/torch/layers/__init__.py | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# flake8: noqa
from nlp_architect.nn.torch.layers.crf import CRF
| 813 | 44.222222 | 80 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/torch/data/dataset.py | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import torch
from typing import List
class ParallelDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
class ConcatTensorDataset(torch.utils.data.Dataset):
r"""Dataset as a concatenation of multiple TensorDataset datasets with same number of tensors.
Each sample will be retrieved by indexing tensors along the first dimension.
Arguments:
dataset (TensorDataset): dataset to which rest datasets will be concatinated.
datasets (List[TensorDataset]): datasets to concat to the dataset.
"""
def __init__(
self,
dataset: torch.utils.data.TensorDataset,
datasets: List[torch.utils.data.TensorDataset],
):
tensors = dataset.tensors
for ds in datasets:
concat_tensors = []
for i, t in enumerate(ds.tensors):
concat_tensors.append(torch.cat((tensors[i], t), 0))
tensors = concat_tensors
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
def __getitem__(self, index):
return tuple(tensor[index] for tensor in self.tensors)
def __len__(self):
return self.tensors[0].size(0)
class CombinedTensorDataset(torch.utils.data.Dataset):
r"""Dataset as a concatenation of tensor datasets with different number of
tensors (labeled dataset/ unlabeled dataset). Labels of unlabeled dataset will
be represented as a tensor of zeros.
Each sample will be retrieved by indexing tensors along the first dimension.
Arguments:
datasets (List[TensorDataset]): datasets to concat.
"""
def __init__(self, datasets: List[torch.utils.data.TensorDataset]):
max_ds_len = max([len(ds.tensors) for ds in datasets])
tensors = ()
# match tensors count
for ds in datasets:
if len(ds.tensors) < max_ds_len: # no labels
ds.tensors += (torch.tensor(torch.zeros(ds.tensors[0].shape), dtype=int),)
# concat
for i in range(max_ds_len):
tensors += (torch.cat([ds.tensors[i] for ds in datasets], dim=0),)
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
def __getitem__(self, index):
return tuple(tensor[index] for tensor in self.tensors)
def __len__(self):
return self.tensors[0].size(0)
| 3,336 | 35.67033 | 98 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/tensorflow/python/keras/callbacks.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import tensorflow as tf
from nlp_architect.utils.metrics import get_conll_scores
class ConllCallback(tf.keras.callbacks.Callback):
"""
A Tensorflow(Keras) Conlleval evaluator.
Runs the conlleval script for given x and y inputs.
Prints Conlleval F1 score on the end of each epoch.
Args:
x: features matrix
y: labels matrix
y_vocab (dict): int-to-str labels lexicon
batch_size (:obj:`int`, optional): batch size
"""
def __init__(self, x, y, y_vocab, batch_size=1):
super(ConllCallback, self).__init__()
self.x = x
self.y = y
self.y_vocab = {v: k for k, v in y_vocab.items()}
self.bsz = batch_size
def on_epoch_end(self, epoch, logs=None):
predictions = self.model.predict(self.x, batch_size=self.bsz)
stats = get_conll_scores(predictions, self.y, self.y_vocab)
print()
print("Conll eval: \n{}".format(stats))
| 1,848 | 34.557692 | 80 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/tensorflow/python/keras/layers/crf.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import tensorflow as tf
class CRF(tf.keras.layers.Layer):
"""
Conditional Random Field layer (tf.keras)
`CRF` can be used as the last layer in a network (as a classifier). Input shape (features)
must be equal to the number of classes the CRF can predict (a linear layer is recommended).
Note: the loss and accuracy functions of networks using `CRF` must
use the provided loss and accuracy functions (denoted as loss and viterbi_accuracy)
as the classification of sequences are used with the layers internal weights.
Args:
num_labels (int): the number of labels to tag each temporal input.
Input shape:
nD tensor with shape `(batch_size, sentence length, num_classes)`.
Output shape:
nD tensor with shape: `(batch_size, sentence length, num_classes)`.
"""
def __init__(self, num_classes, **kwargs):
self.transitions = None
super(CRF, self).__init__(**kwargs)
# num of output labels
self.output_dim = int(num_classes)
self.input_spec = tf.keras.layers.InputSpec(min_ndim=3)
self.supports_masking = False
self.sequence_lengths = None
def get_config(self):
config = {
"output_dim": self.output_dim,
"supports_masking": self.supports_masking,
"transitions": tf.keras.backend.eval(self.transitions),
}
base_config = super(CRF, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
assert len(input_shape) == 3
f_shape = tf.TensorShape(input_shape)
input_spec = tf.keras.layers.InputSpec(min_ndim=3, axes={-1: f_shape[-1]})
if f_shape[-1] is None:
raise ValueError(
"The last dimension of the inputs to `CRF` " "should be defined. Found `None`."
)
if f_shape[-1] != self.output_dim:
raise ValueError(
"The last dimension of the input shape must be equal to output"
" shape. Use a linear layer if needed."
)
self.input_spec = input_spec
self.transitions = self.add_weight(
name="transitions",
shape=[self.output_dim, self.output_dim],
initializer="glorot_uniform",
trainable=True,
)
self.built = True
# pylint: disable=arguments-differ
def call(self, inputs, sequence_lengths=None, **kwargs):
sequences = tf.convert_to_tensor(inputs, dtype=self.dtype)
if sequence_lengths is not None:
assert len(sequence_lengths.shape) == 2
assert tf.convert_to_tensor(sequence_lengths).dtype == "int32"
seq_len_shape = tf.convert_to_tensor(sequence_lengths).get_shape().as_list()
assert seq_len_shape[1] == 1
self.sequence_lengths = tf.keras.backend.flatten(sequence_lengths)
else:
self.sequence_lengths = tf.ones(tf.shape(inputs)[0], dtype=tf.int32) * (
tf.shape(inputs)[1]
)
viterbi_sequence, _ = tf.contrib.crf.crf_decode(
sequences, self.transitions, self.sequence_lengths
)
output = tf.keras.backend.one_hot(viterbi_sequence, self.output_dim)
return tf.keras.backend.in_train_phase(sequences, output)
def loss(self, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred, dtype=self.dtype)
log_likelihood, self.transitions = tf.contrib.crf.crf_log_likelihood(
y_pred,
tf.cast(tf.keras.backend.argmax(y_true), dtype=tf.int32),
self.sequence_lengths,
transition_params=self.transitions,
)
return tf.reduce_mean(-log_likelihood)
def compute_output_shape(self, input_shape):
tf.TensorShape(input_shape).assert_has_rank(3)
return input_shape[:2] + (self.output_dim,)
@property
def viterbi_accuracy(self):
def accuracy(y_true, y_pred):
shape = tf.shape(y_pred)
sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
viterbi_sequence, _ = tf.contrib.crf.crf_decode(
y_pred, self.transitions, sequence_lengths
)
output = tf.keras.backend.one_hot(viterbi_sequence, self.output_dim)
return tf.keras.metrics.categorical_accuracy(y_true, output)
accuracy.func_name = "viterbi_accuracy"
return accuracy
| 5,223 | 40.133858 | 95 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/tensorflow/python/keras/utils/layer_utils.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import pickle
import tempfile
from tensorflow import keras
def save_model(model: keras.models.Model, topology: dict, filepath: str) -> None:
"""
Save a model to a file (tf.keras models only)
The method save the model topology, as given as a
Args:
model: model object
topology (dict): a dictionary of topology elements and their values
filepath (str): path to save model
"""
with tempfile.NamedTemporaryFile(suffix=".h5", delete=True) as fd:
model.save_weights(fd.name)
model_weights = fd.read()
data = {"model_weights": model_weights, "model_topology": topology}
with open(filepath, "wb") as fp:
pickle.dump(data, fp)
def load_model(filepath, model) -> None:
"""
Load a model (tf.keras) from disk, create topology from loaded values
and load weights.
Args:
filepath (str): path to model
model: model object to load
"""
with open(filepath, "rb") as fp:
model_data = pickle.load(fp)
topology = model_data["model_topology"]
model.build(**topology)
with tempfile.NamedTemporaryFile(suffix=".h5", delete=True) as fd:
fd.write(model_data["model_weights"])
fd.flush()
model.model.load_weights(fd.name)
| 2,012 | 35.6 | 81 | py |
nlp-architect | nlp-architect-master/nlp_architect/nn/tensorflow/python/keras/utils/__init__.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# flake8: noqa
from nlp_architect.nn.tensorflow.python.keras.utils.layer_utils import save_model, load_model
| 857 | 46.666667 | 93 | py |
nlp-architect | nlp-architect-master/nlp_architect/models/intent_extraction.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from nlp_architect.nn.tensorflow.python.keras.layers.crf import CRF
from nlp_architect.nn.tensorflow.python.keras.utils import load_model, save_model
class IntentExtractionModel(object):
"""
Intent Extraction model base class (using tf.keras)
"""
def __init__(self):
self.model = None
def fit(self, x, y, epochs=1, batch_size=1, callbacks=None, validation=None):
"""
Train a model given input samples and target labels.
Args:
x: input samples
y: input sample labels
epochs (:obj:`int`, optional): number of epochs to train
batch_size (:obj:`int`, optional): batch size
callbacks(:obj:`Callback`, optional): Keras compatible callbacks
validation(:obj:`list` of :obj:`numpy.ndarray`, optional): optional validation data
to be evaluated when training
"""
assert self.model, "Model was not initialized"
self.model.fit(
x,
y,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_data=validation,
callbacks=callbacks,
)
def predict(self, x, batch_size=1):
"""
Get the prediction of the model on given input
Args:
x: samples to run through the model
batch_size (:obj:`int`, optional): batch size:
Returns:
numpy.ndarray: predicted values by the model
"""
assert self.model, "Model was not initialized"
return self.model.predict(x, batch_size=batch_size)
def save(self, path, exclude=None):
"""
Save model to path
Args:
path (str): path to save model
exclude (list, optional): a list of object fields to exclude when saving
"""
assert self.model, "Model was not initialized"
topology = {k: v for k, v in self.__dict__.items()}
topology.pop("model")
if exclude and isinstance(exclude, list):
for x in exclude:
topology.pop(x)
save_model(self.model, topology=topology, filepath=path)
def load(self, path):
"""
Load a trained model
Args:
path (str): path to model file
"""
load_model(path, self)
@property
def input_shape(self):
""":obj:`tuple`:Get input shape"""
return self.model.layers[0].input_shape
@staticmethod
def _create_input_embed(sentence_len, is_extern_emb, token_emb_size, vocab_size):
if is_extern_emb:
in_layer = e_layer = tf.keras.layers.Input(
shape=(
sentence_len,
token_emb_size,
),
dtype="float32",
name="tokens_input",
)
else:
in_layer = tf.keras.layers.Input(
shape=(sentence_len,), dtype="int32", name="tokens_input"
)
e_layer = tf.keras.layers.Embedding(
vocab_size, token_emb_size, input_length=sentence_len, name="embedding_layer"
)(in_layer)
return in_layer, e_layer
def load_embedding_weights(self, weights):
"""
Load word embedding weights into the model embedding layer
Args:
weights (numpy.ndarray): 2D matrix of word weights
"""
assert self.model is not None, (
"Cannot assign weights, apply build() before trying to " "loading embedding weights "
)
emb_layer = self.model.get_layer(name="word_embedding")
assert emb_layer.output_dim == weights.shape[1], "embedding vectors shape mismatch"
emb_layer.set_weights([weights])
class MultiTaskIntentModel(IntentExtractionModel):
"""
Multi-Task Intent and Slot tagging model (using tf.keras)
Args:
use_cudnn (bool, optional): use GPU based model (CUDNNA cells)
"""
def __init__(self, use_cudnn=False):
super().__init__()
self.model = None
self.word_length = None
self.num_labels = None
self.num_intent_labels = None
self.word_vocab_size = None
self.char_vocab_size = None
self.word_emb_dims = None
self.char_emb_dims = None
self.char_lstm_dims = None
self.tagger_lstm_dims = None
self.dropout = None
self.use_cudnn = use_cudnn
def build(
self,
word_length,
num_labels,
num_intent_labels,
word_vocab_size,
char_vocab_size,
word_emb_dims=100,
char_emb_dims=30,
char_lstm_dims=30,
tagger_lstm_dims=100,
dropout=0.2,
):
"""
Build a model
Args:
word_length (int): max word length (in characters)
num_labels (int): number of slot labels
num_intent_labels (int): number of intent classes
word_vocab_size (int): word vocabulary size
char_vocab_size (int): character vocabulary size
word_emb_dims (int, optional): word embedding dimensions
char_emb_dims (int, optional): character embedding dimensions
char_lstm_dims (int, optional): character feature LSTM hidden size
tagger_lstm_dims (int, optional): tagger LSTM hidden size
dropout (float, optional): dropout rate
"""
self.word_length = word_length
self.num_labels = num_labels
self.num_intent_labels = num_intent_labels
self.word_vocab_size = word_vocab_size
self.char_vocab_size = char_vocab_size
self.word_emb_dims = word_emb_dims
self.char_emb_dims = char_emb_dims
self.char_lstm_dims = char_lstm_dims
self.tagger_lstm_dims = tagger_lstm_dims
self.dropout = dropout
words_input = tf.keras.layers.Input(shape=(None,), name="words_input")
embedding_layer = tf.keras.layers.Embedding(
self.word_vocab_size, self.word_emb_dims, name="word_embedding"
)
word_embeddings = embedding_layer(words_input)
word_embeddings = tf.keras.layers.Dropout(self.dropout)(word_embeddings)
# create word character input and embeddings layer
word_chars_input = tf.keras.layers.Input(
shape=(None, self.word_length), name="word_chars_input"
)
char_embedding_layer = tf.keras.layers.Embedding(
self.char_vocab_size,
self.char_emb_dims,
input_length=self.word_length,
name="char_embedding",
)
# apply embedding to each word
char_embeddings = char_embedding_layer(word_chars_input)
# feed dense char vectors into BiLSTM
char_embeddings = tf.keras.layers.TimeDistributed(
tf.keras.layers.Bidirectional(self._rnn_cell(self.char_lstm_dims))
)(char_embeddings)
char_embeddings = tf.keras.layers.Dropout(self.dropout)(char_embeddings)
# first BiLSTM layer (used for intent classification)
first_bilstm_layer = tf.keras.layers.Bidirectional(
self._rnn_cell(self.tagger_lstm_dims, return_sequences=True, return_state=True)
)
first_lstm_out = first_bilstm_layer(word_embeddings)
lstm_y_sequence = first_lstm_out[:1][0] # save y states of the LSTM layer
states = first_lstm_out[1:]
hf, _, hb, _ = states # extract last hidden states
h_state = tf.keras.layers.concatenate([hf, hb], axis=-1)
intents = tf.keras.layers.Dense(
self.num_intent_labels, activation="softmax", name="intent_classifier_output"
)(h_state)
# create the 2nd feature vectors
combined_features = tf.keras.layers.concatenate([lstm_y_sequence, char_embeddings], axis=-1)
# 2nd BiLSTM layer for label classification
second_bilstm_layer = tf.keras.layers.Bidirectional(
self._rnn_cell(self.tagger_lstm_dims, return_sequences=True)
)(combined_features)
second_bilstm_layer = tf.keras.layers.Dropout(self.dropout)(second_bilstm_layer)
bilstm_out = tf.keras.layers.Dense(self.num_labels)(second_bilstm_layer)
# feed BiLSTM vectors into CRF
with tf.device("/cpu:0"):
crf = CRF(self.num_labels, name="intent_slot_crf")
labels = crf(bilstm_out)
# compile the model
model = tf.keras.Model(inputs=[words_input, word_chars_input], outputs=[intents, labels])
# define losses and metrics
loss_f = {
"intent_classifier_output": "categorical_crossentropy",
"intent_slot_crf": crf.loss,
}
metrics = {
"intent_classifier_output": "categorical_accuracy",
"intent_slot_crf": crf.viterbi_accuracy,
}
model.compile(loss=loss_f, optimizer=tf.train.AdamOptimizer(), metrics=metrics)
self.model = model
def _rnn_cell(self, units, **kwargs):
if self.use_cudnn:
rnn_cell = tf.keras.layers.CuDNNLSTM(units, **kwargs)
else:
rnn_cell = tf.keras.layers.LSTM(units, **kwargs)
return rnn_cell
# pylint: disable=arguments-differ
def save(self, path):
"""
Save model to path
Args:
path (str): path to save model
"""
super().save(path, ["use_cudnn"])
class Seq2SeqIntentModel(IntentExtractionModel):
"""
Encoder Decoder Deep LSTM Tagger Model (using tf.keras)
"""
def __init__(self):
super().__init__()
self.model = None
self.vocab_size = None
self.tag_labels = None
self.token_emb_size = None
self.encoder_depth = None
self.decoder_depth = None
self.lstm_hidden_size = None
self.encoder_dropout = None
self.decoder_dropout = None
def build(
self,
vocab_size,
tag_labels,
token_emb_size=100,
encoder_depth=1,
decoder_depth=1,
lstm_hidden_size=100,
encoder_dropout=0.5,
decoder_dropout=0.5,
):
"""
Build the model
Args:
vocab_size (int): vocabulary size
tag_labels (int): number of tag labels
token_emb_size (int, optional): token embedding vector size
encoder_depth (int, optional): number of encoder LSTM layers
decoder_depth (int, optional): number of decoder LSTM layers
lstm_hidden_size (int, optional): LSTM layers hidden size
encoder_dropout (float, optional): encoder dropout
decoder_dropout (float, optional): decoder dropout
"""
self.vocab_size = vocab_size
self.tag_labels = tag_labels
self.token_emb_size = token_emb_size
self.encoder_depth = encoder_depth
self.decoder_depth = decoder_depth
self.lstm_hidden_size = lstm_hidden_size
self.encoder_dropout = encoder_dropout
self.decoder_dropout = decoder_dropout
words_input = tf.keras.layers.Input(shape=(None,), name="words_input")
emb_layer = tf.keras.layers.Embedding(
self.vocab_size, self.token_emb_size, name="word_embedding"
)
benc_in = emb_layer(words_input)
assert self.encoder_depth > 0, "Encoder depth must be > 0"
for i in range(self.encoder_depth):
bencoder = tf.keras.layers.LSTM(
self.lstm_hidden_size,
return_sequences=True,
return_state=True,
go_backwards=True,
dropout=self.encoder_dropout,
name="encoder_blstm_{}".format(i),
)(benc_in)
benc_in = bencoder[0]
b_states = bencoder[1:]
benc_h, bene_c = b_states
decoder_inputs = benc_in
assert self.decoder_depth > 0, "Decoder depth must be > 0"
for i in range(self.decoder_depth):
decoder = tf.keras.layers.LSTM(
self.lstm_hidden_size, return_sequences=True, name="decoder_lstm_{}".format(i)
)(decoder_inputs, initial_state=[benc_h, bene_c])
decoder_inputs = decoder
decoder_outputs = tf.keras.layers.Dropout(self.decoder_dropout)(decoder)
decoder_predictions = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(self.tag_labels, activation="softmax"), name="decoder_classifier"
)(decoder_outputs)
self.model = tf.keras.Model(words_input, decoder_predictions)
self.model.compile(
optimizer=tf.train.AdamOptimizer(),
loss="categorical_crossentropy",
metrics=["categorical_accuracy"],
)
| 13,602 | 35.469169 | 100 | py |
nlp-architect | nlp-architect-master/nlp_architect/models/tagging.py | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import io
import logging
import os
import pickle
from typing import List
import numpy as np
import torch
import torch.optim as optim
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from nlp_architect.data.sequential_tagging import TokenClsInputExample
from nlp_architect.models import TrainableModel
from nlp_architect.nn.torch.layers import CRF
from nlp_architect.nn.torch.distillation import TeacherStudentDistill
from nlp_architect.nn.torch.modules.embedders import IDCNN
from nlp_architect.utils.metrics import tagging
from nlp_architect.utils.text import Vocabulary, char_to_id
logger = logging.getLogger(__name__)
class NeuralTagger(TrainableModel):
"""
Simple neural tagging model
Supports pytorch embedder models, multi-gpu training, KD from teacher models
Args:
embedder_model: pytorch embedder model (valid nn.Module model)
word_vocab (Vocabulary): word vocabulary
labels (List, optional): list of labels. Defaults to None
use_crf (bool, optional): use CRF a the classifier (instead of Softmax). Defaults to False.
device (str, optional): device backend. Defatuls to 'cpu'.
n_gpus (int, optional): number of gpus. Default to 0.
"""
def __init__(
self,
embedder_model,
word_vocab: Vocabulary,
labels: List[str] = None,
use_crf: bool = False,
device: str = "cpu",
n_gpus=0,
):
super(NeuralTagger, self).__init__()
self.model = embedder_model
self.labels = labels
self.num_labels = len(labels) + 1 # +1 for padding
self.label_str_id = {l: i for i, l in enumerate(self.labels, 1)}
self.label_id_str = {v: k for k, v in self.label_str_id.items()}
self.word_vocab = word_vocab
self.use_crf = use_crf
if self.use_crf:
self.crf = CRF(self.num_labels, batch_first=True)
self.device = device
self.n_gpus = n_gpus
self.to(self.device, self.n_gpus)
def convert_to_tensors(
self,
examples: List[TokenClsInputExample],
max_seq_length: int = 128,
max_word_length: int = 12,
pad_id: int = 0,
labels_pad_id: int = 0,
include_labels: bool = True,
) -> TensorDataset:
"""
Convert examples to valid tagger dataset
Args:
examples (List[TokenClsInputExample]): List of examples
max_seq_length (int, optional): max words per sentence. Defaults to 128.
max_word_length (int, optional): max characters in a word. Defaults to 12.
pad_id (int, optional): padding int id. Defaults to 0.
labels_pad_id (int, optional): labels padding id. Defaults to 0.
include_labels (bool, optional): include labels in dataset. Defaults to True.
Returns:
TensorDataset: TensorDataset for given examples
"""
features = []
for example in examples:
word_tokens = [self.word_vocab[t] for t in example.tokens]
labels = []
if include_labels:
labels = [self.label_str_id.get(label) for label in example.label]
word_chars = []
for word in example.tokens:
word_chars.append([char_to_id(c) for c in word])
word_shapes = example.shapes
# cut up to max length
word_tokens = word_tokens[:max_seq_length]
word_shapes = word_shapes[:max_seq_length]
if include_labels:
labels = labels[:max_seq_length]
word_chars = word_chars[:max_seq_length]
for i in range(len(word_chars)):
word_chars[i] = word_chars[i][:max_word_length]
mask = [1] * len(word_tokens)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(word_tokens)
input_ids = word_tokens + ([pad_id] * padding_length)
shape_ids = word_shapes + ([pad_id] * padding_length)
mask = mask + ([0] * padding_length)
if include_labels:
label_ids = labels + ([labels_pad_id] * padding_length)
word_char_ids = []
# pad word vectors
for i in range(len(word_chars)):
word_char_ids.append(
word_chars[i] + ([pad_id] * (max_word_length - len(word_chars[i])))
)
# pad word vectors with remaining zero vectors
for _ in range(padding_length):
word_char_ids.append(([pad_id] * max_word_length))
assert len(input_ids) == max_seq_length
assert len(shape_ids) == max_seq_length
if include_labels:
assert len(label_ids) == max_seq_length
assert len(word_char_ids) == max_seq_length
for i in range(len(word_char_ids)):
assert len(word_char_ids[i]) == max_word_length
features.append(
InputFeatures(
input_ids,
word_char_ids,
shape_ids,
mask=mask,
label_id=label_ids if include_labels else None,
)
)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_char_ids = torch.tensor([f.char_ids for f in features], dtype=torch.long)
all_shape_ids = torch.tensor([f.shape_ids for f in features], dtype=torch.long)
masks = torch.tensor([f.mask for f in features], dtype=torch.long)
if include_labels:
is_labeled = torch.tensor([True for f in features], dtype=torch.bool)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_char_ids, all_shape_ids, masks, is_labeled, all_label_ids
)
else:
is_labeled = torch.tensor([False for f in features], dtype=torch.bool)
dataset = TensorDataset(all_input_ids, all_char_ids, all_shape_ids, masks, is_labeled)
return dataset
def get_optimizer(self, opt_fn=None, lr: int = 0.001):
"""
Get default optimizer
Args:
lr (int, optional): learning rate. Defaults to 0.001.
Returns:
torch.optim.Optimizer: optimizer
"""
params = self.model.parameters()
if self.use_crf:
params = list(params) + list(self.crf.parameters())
if opt_fn is None:
opt_fn = optim.Adam
return opt_fn(params, lr=lr)
@staticmethod
def batch_mapper(batch):
"""
Map batch to correct input names
"""
mapping = {
"words": batch[0],
"word_chars": batch[1],
"shapes": batch[2],
"mask": batch[3],
"is_labeled": batch[4],
}
if len(batch) == 6:
mapping.update({"labels": batch[5]})
return mapping
def train(
self,
train_data_set: DataLoader,
dev_data_set: DataLoader = None,
test_data_set: DataLoader = None,
epochs: int = 3,
batch_size: int = 8,
optimizer=None,
max_grad_norm: float = 5.0,
logging_steps: int = 50,
save_steps: int = 100,
save_path: str = None,
distiller: TeacherStudentDistill = None,
best_result_file: str = None,
word_dropout: float = 0,
):
"""
Train a tagging model
Args:
train_data_set (DataLoader): train examples dataloader.
- If distiller object is provided train examples should contain a tuple of
student/teacher data examples.
dev_data_set (DataLoader, optional): dev examples dataloader. Defaults to None.
test_data_set (DataLoader, optional): test examples dataloader. Defaults to None.
epochs (int, optional): num of epochs to train. Defaults to 3.
batch_size (int, optional): batch size. Defaults to 8.
optimizer (fn, optional): optimizer function. Defaults to default model optimizer.
max_grad_norm (float, optional): max gradient norm. Defaults to 5.0.
logging_steps (int, optional): number of steps between logging. Defaults to 50.
save_steps (int, optional): number of steps between model saves. Defaults to 100.
save_path (str, optional): model output path. Defaults to None.
distiller (TeacherStudentDistill, optional): KD model for training the model using
a teacher model. Defaults to None.
best_result_file (str, optional): path to save best dev results when it's updated.
word_dropout (float, optional): whole-word (-> oov) dropout rate. Defaults to 0.
"""
if optimizer is None:
optimizer = self.get_optimizer()
train_batch_size = batch_size * max(1, self.n_gpus)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_data_set.dataset))
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per GPU/CPU = %d", batch_size)
logger.info(" Total batch size = %d", train_batch_size)
global_step = 0
best_dev = 0
dev_test = 0
self.model.zero_grad()
epoch_it = trange(epochs, desc="Epoch")
for epoch in epoch_it:
step_it = tqdm(train_data_set, desc="Train iteration")
avg_loss = 0
for step, batches in enumerate(step_it):
self.model.train()
batch, t_batch = (batches, []) if not distiller else (batches[:2])
batch = tuple(t.to(self.device) for t in batch)
inputs = self.batch_mapper(batch)
logits = self.model(**inputs)
if distiller:
t_batch = tuple(t.to(self.device) for t in t_batch)
t_logits = distiller.get_teacher_logits(t_batch)
valid_positions = (
t_batch[3] != 0.0
) # TODO: implement method to get only valid logits from the model itself
valid_t_logits = {}
max_seq_len = logits.shape[1]
for i in range(len(logits)): # each example in batch
valid_logit_i = t_logits[i][valid_positions[i]]
valid_t_logits[i] = (
valid_logit_i
if valid_logit_i.shape[0] <= max_seq_len
else valid_logit_i[:][:max_seq_len]
) # cut to max len
# prepare teacher labels for non-labeled examples
t_labels_dict = {}
for i in range(len(valid_t_logits.keys())):
t_labels_dict[i] = torch.argmax(
F.log_softmax(valid_t_logits[i], dim=-1), dim=-1
)
# pseudo labeling
for i, is_labeled in enumerate(inputs["is_labeled"]):
if not is_labeled:
t_labels_i = t_labels_dict[i]
# add the padded teacher label:
inputs["labels"][i] = torch.cat(
(
t_labels_i,
torch.zeros([max_seq_len - len(t_labels_i)], dtype=torch.long).to(
self.device
),
),
0,
)
# apply word dropout to the input
if word_dropout != 0:
tokens = inputs["words"]
tokens = np.array(tokens.detach().cpu())
word_probs = np.random.random(tokens.shape)
drop_indices = np.where(
(word_probs > word_dropout) & (tokens != 0)
) # ignore padding indices
inputs["words"][drop_indices[0], drop_indices[1]] = self.word_vocab.oov_id
# loss
if self.use_crf:
loss = -1.0 * self.crf(logits, inputs["labels"], mask=inputs["mask"] != 0.0)
else:
loss_fn = CrossEntropyLoss(ignore_index=0)
loss = loss_fn(logits.view(-1, self.num_labels), inputs["labels"].view(-1))
# for idcnn training - add dropout penalty loss
module = self.model.module if self.n_gpus > 1 else self.model
if isinstance(module, IDCNN) and module.drop_penalty != 0:
logits_no_drop = self.model(**inputs, no_dropout=True)
sub = logits.sub(logits_no_drop)
drop_loss = torch.div(torch.sum(torch.pow(sub, 2)), 2)
loss += module.drop_penalty * drop_loss
if self.n_gpus > 1:
loss = loss.mean()
# add distillation loss if activated
if distiller:
# filter masked student logits (no padding)
valid_s_logits = {}
valid_s_positions = inputs["mask"] != 0.0
for i in range(len(logits)):
valid_s_logit_i = logits[i][valid_s_positions[i]]
valid_s_logits[i] = valid_s_logit_i
loss = distiller.distill_loss_dict(loss, valid_s_logits, valid_t_logits)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
global_step += 1
avg_loss += loss.item()
if global_step % logging_steps == 0:
if step != 0:
logger.info(
" global_step = %s, average loss = %s", global_step, avg_loss / step
)
best_dev, dev_test = self.update_best_model(
dev_data_set,
test_data_set,
best_dev,
dev_test,
best_result_file,
avg_loss / step,
epoch,
save_path=None,
)
if save_steps != 0 and save_path is not None and global_step % save_steps == 0:
self.save_model(save_path)
self.update_best_model(
dev_data_set,
test_data_set,
best_dev,
dev_test,
best_result_file,
"end_training",
"end_training",
save_path=save_path + "/best_dev",
)
def _get_eval(self, ds, set_name):
if ds is not None:
logits, out_label_ids = self.evaluate(ds)
res = self.evaluate_predictions(logits, out_label_ids)
logger.info(" {} set F1 = {}".format(set_name, res["f1"]))
return res["f1"]
return None
def to(self, device="cpu", n_gpus=0):
"""
Put model on given device
Args:
device (str, optional): device backend. Defaults to 'cpu'.
n_gpus (int, optional): number of gpus. Defaults to 0.
"""
if self.model is not None:
self.model.to(device)
if self.use_crf:
self.crf.to(device)
if n_gpus > 1:
self.model = torch.nn.DataParallel(self.model)
if self.use_crf:
self.crf = torch.nn.DataParallel(self.crf)
self.device = device
self.n_gpus = n_gpus
def evaluate(self, data_set: DataLoader):
"""
Run evaluation on given dataloader
Args:
data_set (DataLoader): a data loader to run evaluation on
Returns:
logits, labels (if labels are given)
"""
logger.info("***** Running inference *****")
logger.info(" Batch size: {}".format(data_set.batch_size))
preds = None
out_label_ids = None
for batch in tqdm(data_set, desc="Inference iteration"):
self.model.eval()
batch = tuple(t.to(self.device) for t in batch)
with torch.no_grad():
inputs = self.batch_mapper(batch)
logits = self.model(**inputs)
model_output = logits.detach().cpu()
model_out_label_ids = inputs["labels"].detach().cpu() if "labels" in inputs else None
if preds is None:
preds = model_output
out_label_ids = model_out_label_ids
else:
preds = torch.cat((preds, model_output), dim=0)
out_label_ids = (
torch.cat((out_label_ids, model_out_label_ids), dim=0)
if out_label_ids is not None
else None
)
output = (preds,)
if out_label_ids is not None:
output = output + (out_label_ids,)
return output
def evaluate_predictions(self, logits, label_ids):
"""
Evaluate given logits on truth labels
Args:
logits: logits of model
label_ids: truth label ids
Returns:
dict: dictionary containing P/R/F1 metrics
"""
active_positions = label_ids.view(-1) != 0.0
active_labels = label_ids.view(-1)[active_positions]
if self.use_crf:
logits_shape = logits.size()
decode_ap = active_positions.view(logits_shape[0], logits_shape[1]) != 0.0
if self.n_gpus > 1:
decode_fn = self.crf.module.decode
else:
decode_fn = self.crf.decode
logits = decode_fn(logits.to(self.device), mask=decode_ap.to(self.device))
logits = [lll for ll in logits for lll in ll]
else:
active_logits = logits.view(-1, len(self.label_id_str) + 1)[active_positions]
logits = torch.argmax(F.log_softmax(active_logits, dim=1), dim=1)
logits = logits.detach().cpu().numpy()
out_label_ids = active_labels.detach().cpu().numpy()
y_true, y_pred = self.extract_labels(out_label_ids, logits)
p, r, f1 = tagging(y_pred, y_true)
return {"p": p, "r": r, "f1": f1}
def update_best_model(
self,
dev_data_set,
test_data_set,
best_dev,
best_dev_test,
best_result_file,
loss,
epoch,
save_path=None,
):
new_best_dev = best_dev
new_test = best_dev_test
dev = self._get_eval(dev_data_set, "dev")
test = self._get_eval(test_data_set, "test")
if dev > best_dev:
new_best_dev = dev
new_test = test
if best_result_file is not None:
with open(best_result_file, "a+") as f:
f.write(
"best dev= "
+ str(new_best_dev)
+ ", test= "
+ str(new_test)
+ ", loss= "
+ str(loss)
+ ", epoch= "
+ str(epoch)
+ "\n"
)
logger.info("Best result: Dev=%s, Test=%s", str(new_best_dev), str(new_test))
if save_path is not None:
self.save_model(save_path)
return new_best_dev, new_test
def extract_labels(self, label_ids, logits):
label_map = self.label_id_str
y_true = []
y_pred = []
for p, y in zip(logits, label_ids):
y_pred.append(label_map.get(p, "O"))
y_true.append(label_map.get(y, "O"))
assert len(y_true) == len(y_pred)
return (y_true, y_pred)
def inference(self, examples: List[TokenClsInputExample], batch_size: int = 64):
"""
Do inference on given examples
Args:
examples (List[TokenClsInputExample]): examples
batch_size (int, optional): batch size. Defaults to 64.
Returns:
List(tuple): a list of tuples of tokens, tags predicted by model
"""
data_set = self.convert_to_tensors(examples, include_labels=False)
inf_sampler = SequentialSampler(data_set)
inf_dataloader = DataLoader(data_set, sampler=inf_sampler, batch_size=batch_size)
logits = self.evaluate(inf_dataloader)
active_positions = data_set.tensors[-2].view(len(data_set), -1) != 0.0
logits = torch.argmax(F.log_softmax(logits[0], dim=2), dim=2)
res_ids = []
for i in range(logits.size()[0]):
res_ids.append(logits[i][active_positions[i]].detach().cpu().numpy())
output = []
for tag_ids, ex in zip(res_ids, examples):
tokens = ex.tokens
tags = [self.label_id_str.get(t, "O") for t in tag_ids]
output.append((tokens, tags))
return output
def save_model(self, output_dir: str):
"""
Save model to path
Args:
output_dir (str): output directory
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
torch.save(self.model, os.path.join(output_dir, "model.bin"))
if self.use_crf:
torch.save(self.crf, os.path.join(output_dir, "crf.bin"))
with io.open(output_dir + os.sep + "labels.txt", "w", encoding="utf-8") as fw:
for lbl in self.labels:
fw.write("{}\n".format(lbl))
with io.open(output_dir + os.sep + "w_vocab.dat", "wb") as fw:
pickle.dump(self.word_vocab, fw)
@classmethod
def load_model(cls, model_path: str):
"""
Load a tagger model from given path
Args:
model_path (str): model path
NeuralTagger: tagger model loaded from path
"""
# Load a trained model and vocabulary from given path
if not os.path.exists(model_path):
raise FileNotFoundError
with io.open(model_path + os.sep + "labels.txt") as fp:
labels = [lines.strip() for lines in fp.readlines()]
with io.open(model_path + os.sep + "w_vocab.dat", "rb") as fp:
w_vocab = pickle.load(fp)
# load model.bin into
model_file_path = model_path + os.sep + "model.bin"
if not os.path.exists(model_file_path):
raise FileNotFoundError
model = torch.load(model_file_path)
new_class = cls(model, w_vocab, labels)
crf_file_path = model_path + os.sep + "crf.bin"
if os.path.exists(crf_file_path):
new_class.use_crf = True
new_class.crf = torch.load(crf_file_path)
else:
new_class.use_crf = False
return new_class
def get_logits(self, batch):
self.model.eval()
inputs = self.batch_mapper(batch)
outputs = self.model(**inputs)
return outputs[-1]
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, char_ids, shape_ids, mask=None, label_id=None):
self.input_ids = input_ids
self.char_ids = char_ids
self.shape_ids = shape_ids
self.mask = mask
self.label_id = label_id
| 24,647 | 39.012987 | 99 | py |
nlp-architect | nlp-architect-master/nlp_architect/models/temporal_convolutional_network.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# pylint: disable=no-name-in-module
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.layers import Wrapper
from tensorflow.python.layers.convolutional import Conv1D
from tensorflow.python.ops import variable_scope
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.eager import context
from tensorflow.python.ops import nn_impl
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
# ***NOTE***: The WeightNorm Class is copied from this PR:
# https://github.com/tensorflow/tensorflow/issues/14070
# Once this becomes part of the official TF release, it will be removed
class WeightNorm(Wrapper):
"""This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem.
Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
Tim Salimans, Diederik P. Kingma (2016)
WeightNorm wrapper works for keras and tf layers.
```python
net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3), data_init=True)(x)
net = WeightNorm(tf.keras.layers.Conv2D(16, 5, activation='relu'),
data_init=True)
net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNorm(tf.keras.layers.Dense(n_classes),
data_init=True)(net)
```
Arguments:
layer: a layer instance.
data_init: If `True` use data dependent variable initialization
Raises:
ValueError: If not initialized with a `Layer` instance.
ValueError: If `Layer` does not contain a `kernel` of weights
NotImplementedError: If `data_init` is True and running graph execution
"""
def __init__(self, layer, data_init=False, **kwargs):
if not isinstance(layer, Layer):
raise ValueError(
"Please initialize `WeightNorm` layer with a "
"`Layer` instance. You passed: {input}".format(input=layer)
)
if not context.executing_eagerly() and data_init:
raise NotImplementedError(
"Data dependent variable initialization is not available for " "graph execution"
)
self.initialized = True
if data_init:
self.initialized = False
self.layer_depth = None
self.norm_axes = None
super(WeightNorm, self).__init__(layer, **kwargs)
self._track_trackable(layer, name="layer")
def _compute_weights(self):
"""Generate weights by combining the direction of weight vector
with it's norm"""
with variable_scope.variable_scope("compute_weights"):
self.layer.kernel = (
nn_impl.l2_normalize(self.layer.v, axis=self.norm_axes) * self.layer.g
)
def _init_norm(self, weights):
"""Set the norm of the weight vector"""
from tensorflow.python.ops.linalg_ops import norm
with variable_scope.variable_scope("init_norm"):
# pylint: disable=no-member
flat = array_ops.reshape(weights, [-1, self.layer_depth])
# pylint: disable=no-member
return array_ops.reshape(norm(flat, axis=0), (self.layer_depth,))
def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution"""
from tensorflow.python.ops.nn import moments
from tensorflow.python.ops.math_ops import sqrt
with variable_scope.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = moments(x_init, self.norm_axes)
scale_init = 1.0 / sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = -1 * m_init * scale_init
self.layer.activation = activation
self.initialized = True
# pylint: disable=signature-differs
def build(self, input_shape):
"""Build `Layer`"""
input_shape = tensor_shape.TensorShape(input_shape).as_list()
self.input_spec = InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError(
"`WeightNorm` must wrap a layer that" " contains a `kernel` for weights"
)
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=initializers.get("ones"),
dtype=self.layer.kernel.dtype,
trainable=True,
)
with ops.control_dependencies([self.layer.g.assign(self._init_norm(self.layer.v))]):
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True
# pylint: disable=arguments-differ
def call(self, inputs):
"""Call `Layer`"""
if context.executing_eagerly():
if not self.initialized:
self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape(self.layer.compute_output_shape(input_shape).as_list())
class TCN:
"""
This class defines core TCN architecture.
This is only the base class, training strategy is not implemented.
"""
def __init__(self, max_len, n_features_in, hidden_sizes, kernel_size=7, dropout=0.2):
"""
To use this class,
1. Inherit this class
2. Define the training losses in build_train_graph()
3. Define the training strategy in run()
4. After the inherited class object is initialized,
call build_train_graph followed by run
Args:
max_len: Maximum length of sequence
n_features_in: Number of input features (dimensions)
hidden_sizes: Number of hidden sizes in each layer of TCN (same for all layers)
kernel_size: Kernel size of convolution filter (same for all layers)
dropout: Dropout, fraction of activations to drop
"""
self.max_len = max_len
self.n_features_in = n_features_in
self.hidden_sizes = hidden_sizes
self.kernel_size = kernel_size
self.dropout = dropout
self.n_hidden_layers = len(self.hidden_sizes)
receptive_field_len = self.calculate_receptive_field()
if receptive_field_len < self.max_len:
print(
"Warning! receptive field of the TCN: "
"%d is less than the input sequence length: %d."
% (receptive_field_len, self.max_len)
)
else:
print(
"Receptive field of the TCN: %d, input sequence length: %d."
% (receptive_field_len, self.max_len)
)
self.layer_activations = []
# toggle this for train/inference mode
self.training_mode = tf.placeholder(tf.bool, name="training_mode")
self.sequence_output = None
def calculate_receptive_field(self):
"""
Returns:
"""
return 1 + 2 * (self.kernel_size - 1) * (2 ** self.n_hidden_layers - 1)
def build_network_graph(self, x, last_timepoint=False):
"""
Given the input placeholder x, build the entire TCN graph
Args:
x: Input placeholder
last_timepoint: Whether or not to select only the last timepoint to output
Returns:
output of the TCN
"""
# loop and define multiple residual blocks
with tf.variable_scope("tcn"):
for i in range(self.n_hidden_layers):
dilation_size = 2 ** i
in_channels = self.n_features_in if i == 0 else self.hidden_sizes[i - 1]
out_channels = self.hidden_sizes[i]
with tf.variable_scope("residual_block_" + str(i)):
x = self._residual_block(
x,
in_channels,
out_channels,
dilation_size,
(self.kernel_size - 1) * dilation_size,
)
x = tf.nn.relu(x)
self.layer_activations.append(x)
self.sequence_output = x
# get outputs
if not last_timepoint:
prediction = self.sequence_output
else:
# last time point size (batch_size, hidden_sizes_encoder)
width = self.sequence_output.shape[1].value
lt = tf.squeeze(
tf.slice(self.sequence_output, [0, width - 1, 0], [-1, 1, -1]), axis=1
)
prediction = tf.layers.Dense(
1,
kernel_initializer=tf.initializers.random_normal(0, 0.01),
bias_initializer=tf.initializers.random_normal(0, 0.01),
)(lt)
return prediction
def _residual_block(self, x, in_channels, out_channels, dilation, padding):
"""
Defines the residual block
Args:
x: Input tensor to residual block
in_channels: Number of input features (dimensions)
out_channels: Number of output features (dimensions)
dilation: Dilation rate
padding: Padding value
Returns:
Output of residual path
"""
xin = x
# define two temporal blocks
for i in range(2):
with tf.variable_scope("temporal_block_" + str(i)):
x = self._temporal_block(x, out_channels, dilation, padding)
# sidepath
if in_channels != out_channels:
x_side = tf.layers.Conv1D(
filters=out_channels,
kernel_size=1,
padding="same",
strides=1,
activation=None,
dilation_rate=1,
kernel_initializer=tf.initializers.random_normal(0, 0.01),
bias_initializer=tf.initializers.random_normal(0, 0.01),
)(xin)
else:
x_side = xin
# combine both
return tf.add(x, x_side)
def _temporal_block(self, x, out_channels, dilation, padding):
"""
Defines the temporal block, which is a dilated causual conv layer,
followed by relu and dropout
Args:
x: Input to temporal block
out_channels: Number of conv filters
dilation: dilation rate
padding: padding value
Returns:
Tensor output of temporal block
"""
# conv layer
x = self._dilated_causal_conv(x, out_channels, dilation, padding)
x = tf.nn.relu(x)
# dropout
batch_size = tf.shape(x)[0]
x = tf.layers.dropout(
x,
rate=self.dropout,
noise_shape=[batch_size, 1, out_channels],
training=self.training_mode,
)
return x
# define model
def _dilated_causal_conv(self, x, n_filters, dilation, padding):
"""
Defines dilated causal convolution
Args:
x: Input activation
n_filters: Number of convolution filters
dilation: Dilation rate
padding: padding value
Returns:
Tensor output of convolution
"""
input_width = x.shape[1].value
with tf.variable_scope("dilated_causal_conv"):
# define dilated convolution layer with left side padding
x = tf.pad(x, tf.constant([[0, 0], [padding, 0], [0, 0]]), "CONSTANT")
x = WeightNorm(
Conv1D(
filters=n_filters,
kernel_size=self.kernel_size,
padding="valid",
strides=1,
activation=None,
dilation_rate=dilation,
kernel_initializer=tf.initializers.random_normal(0, 0.01),
bias_initializer=tf.initializers.random_normal(0, 0.01),
)
)(x)
assert x.shape[1].value == input_width
return x
def build_train_graph(self, *args, **kwargs):
"""
Placeholder for defining training losses and metrics
"""
raise NotImplementedError("Error! losses for training must be defined")
def run(self, *args, **kwargs):
"""
Placeholder for defining training strategy
"""
raise NotImplementedError("Error! training routine must be defined")
class CommonLayers:
"""
Class that contains the common layers for language modeling -
word embeddings and projection layer
"""
def __init__(self):
"""
Initialize class
"""
self.word_embeddings_tf = None
self.num_words = None
self.n_features_in = None
def define_input_layer(
self, input_placeholder_tokens, word_embeddings, embeddings_trainable=True
):
"""
Define the input word embedding layer
Args:
input_placeholder_tokens: tf.placeholder, input to the model
word_embeddings: numpy array (optional), to initialize the embeddings with
embeddings_trainable: boolean, whether or not to train the embedding table
Returns:
Embeddings corresponding to the data in input placeholder
"""
with tf.device("/cpu:0"):
with tf.variable_scope("embedding_layer", reuse=False):
if word_embeddings is None:
initializer = tf.initializers.random_normal(0, 0.01)
else:
initializer = tf.constant_initializer(word_embeddings)
self.word_embeddings_tf = tf.get_variable(
"embedding_table",
shape=[self.num_words, self.n_features_in],
initializer=initializer,
trainable=embeddings_trainable,
)
input_embeddings = tf.nn.embedding_lookup(
self.word_embeddings_tf, input_placeholder_tokens
)
return input_embeddings
def define_projection_layer(self, prediction, tied_weights=True):
"""
Define the output word embedding layer
Args:
prediction: tf.tensor, the prediction from the model
tied_weights: boolean, whether or not to tie weights from the input embedding layer
Returns:
Probability distribution over vocabulary
"""
with tf.device("/cpu:0"):
if tied_weights:
# tie projection layer and embedding layer
with tf.variable_scope("embedding_layer", reuse=tf.AUTO_REUSE):
softmax_w = tf.matrix_transpose(self.word_embeddings_tf)
softmax_b = tf.get_variable("softmax_b", [self.num_words])
_, l, k = prediction.shape.as_list()
prediction_reshaped = tf.reshape(prediction, [-1, k])
mult_out = tf.nn.bias_add(tf.matmul(prediction_reshaped, softmax_w), softmax_b)
projection_out = tf.reshape(mult_out, [-1, l, self.num_words])
else:
with tf.variable_scope("projection_layer", reuse=False):
projection_out = tf.layers.Dense(self.num_words)(prediction)
return projection_out
| 17,395 | 36.735358 | 99 | py |
nlp-architect | nlp-architect-master/nlp_architect/models/chunker.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from nlp_architect.nn.tensorflow.python.keras.layers.crf import CRF
from nlp_architect.nn.tensorflow.python.keras.utils import load_model, save_model
class SequenceTagger(object):
"""
A sequence tagging model for POS and Chunks written in Tensorflow (and Keras) based on the
paper 'Deep multi-task learning with low level tasks supervised at lower layers'.
The model has 3 Bi-LSTM layers and outputs POS and Chunk tags.
Args:
use_cudnn (bool, optional): use GPU based model (CUDNNA cells)
"""
def __init__(self, use_cudnn=False):
self.vocabulary_size = None
self.num_pos_labels = None
self.num_chunk_labels = None
self.char_vocab_size = None
self.feature_size = None
self.dropout = None
self.max_word_len = None
self.classifier = None
self.optimizer = None
self.model = None
self.use_cudnn = use_cudnn
def build(
self,
vocabulary_size,
num_pos_labels,
num_chunk_labels,
char_vocab_size=None,
max_word_len=25,
feature_size=100,
dropout=0.5,
classifier="softmax",
optimizer=None,
):
"""
Build a chunker/POS model
Args:
vocabulary_size (int): the size of the input vocabulary
num_pos_labels (int): the size of of POS labels
num_chunk_labels (int): the sie of chunk labels
char_vocab_size (int, optional): character vocabulary size
max_word_len (int, optional): max characters in a word
feature_size (int, optional): feature size - determines the embedding/LSTM layer \
hidden state size
dropout (float, optional): dropout rate
classifier (str, optional): classifier layer, 'softmax' for softmax or 'crf' for \
conditional random fields classifier. default is 'softmax'.
optimizer (tensorflow.python.training.optimizer.Optimizer, optional): optimizer, if \
None will use default SGD (paper setup)
"""
self.vocabulary_size = vocabulary_size
self.char_vocab_size = char_vocab_size
self.num_pos_labels = num_pos_labels
self.num_chunk_labels = num_chunk_labels
self.max_word_len = max_word_len
self.feature_size = feature_size
self.dropout = dropout
self.classifier = classifier
word_emb_layer = tf.keras.layers.Embedding(
self.vocabulary_size, self.feature_size, name="embedding", mask_zero=False
)
word_input = tf.keras.layers.Input(shape=(None,))
word_embedding = word_emb_layer(word_input)
input_src = word_input
features = word_embedding
# add char input if present
if self.char_vocab_size is not None:
char_input = tf.keras.layers.Input(shape=(None, self.max_word_len))
char_emb_layer = tf.keras.layers.Embedding(
self.char_vocab_size, 30, name="char_embedding", mask_zero=False
)
char_embedding = char_emb_layer(char_input)
char_embedding = tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv1D(30, 3, padding="same")
)(char_embedding)
char_embedding = tf.keras.layers.TimeDistributed(tf.keras.layers.GlobalMaxPooling1D())(
char_embedding
)
input_src = [input_src, char_input]
features = tf.keras.layers.concatenate([word_embedding, char_embedding])
rnn_layer_1 = tf.keras.layers.Bidirectional(self._rnn_cell(return_sequences=True))(features)
rnn_layer_2 = tf.keras.layers.Bidirectional(self._rnn_cell(return_sequences=True))(
rnn_layer_1
)
rnn_layer_3 = tf.keras.layers.Bidirectional(self._rnn_cell(return_sequences=True))(
rnn_layer_2
)
# outputs
pos_out = tf.keras.layers.Dense(
self.num_pos_labels, activation="softmax", name="pos_output"
)(rnn_layer_1)
losses = {"pos_output": "categorical_crossentropy"}
metrics = {"pos_output": "categorical_accuracy"}
if "crf" in self.classifier:
with tf.device("/cpu:0"):
chunk_crf = CRF(self.num_chunk_labels, name="chunk_crf")
rnn_layer_3_dense = tf.keras.layers.Dense(self.num_chunk_labels)(
tf.keras.layers.Dropout(self.dropout)(rnn_layer_3)
)
chunks_out = chunk_crf(rnn_layer_3_dense)
losses["chunk_crf"] = chunk_crf.loss
metrics["chunk_crf"] = chunk_crf.viterbi_accuracy
else:
chunks_out = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(self.num_chunk_labels, activation="softmax"), name="chunk_out"
)(rnn_layer_3)
losses["chunk_out"] = "categorical_crossentropy"
metrics["chunk_out"] = "categorical_accuracy"
model = tf.keras.Model(input_src, [pos_out, chunks_out])
if optimizer is None:
self.optimizer = tf.keras.optimizers.Adam(0.001, clipnorm=5.0)
else:
self.optimizer = optimizer
model.compile(optimizer=self.optimizer, loss=losses, metrics=metrics)
self.model = model
def load_embedding_weights(self, weights):
"""
Load word embedding weights into the model embedding layer
Args:
weights (numpy.ndarray): 2D matrix of word weights
"""
assert self.model is not None, (
"Cannot assign weights, apply build() before trying to " "loading embedding weights "
)
emb_layer = self.model.get_layer(name="embedding")
assert emb_layer.output_dim == weights.shape[1], "embedding vectors shape mismatch"
emb_layer.set_weights([weights])
def _rnn_cell(self, **kwargs):
if self.use_cudnn:
rnn_cell = tf.keras.layers.CuDNNLSTM(self.feature_size, **kwargs)
else:
rnn_cell = tf.keras.layers.LSTM(self.feature_size, **kwargs)
return rnn_cell
def fit(self, x, y, batch_size=1, epochs=1, validation_data=None, callbacks=None):
"""
Fit provided X and Y on built model
Args:
x: x samples
y: y samples
batch_size (int, optional): batch size per sample
epochs (int, optional): number of epochs to run before ending training process
validation_data (optional): x and y samples to validate at the end of the epoch
callbacks (optional): additional callbacks to run with fitting
"""
self.model.fit(
x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
validation_data=validation_data,
callbacks=callbacks,
)
def predict(self, x, batch_size=1):
"""
Predict labels given x.
Args:
x: samples for inference
batch_size (int, optional): forward pass batch size
Returns:
tuple of numpy arrays of pos and chunk labels
"""
return self.model.predict(x=x, batch_size=batch_size)
def save(self, filepath):
"""
Save the model to disk
Args:
filepath (str): file name to save model
"""
topology = {k: v for k, v in self.__dict__.items()}
topology.pop("model")
topology.pop("optimizer")
topology.pop("use_cudnn")
save_model(self.model, topology, filepath)
def load(self, filepath):
"""
Load model from disk
Args:
filepath (str): file name of model
"""
load_model(filepath, self)
class SequenceChunker(SequenceTagger):
"""
A sequence Chunker model written in Tensorflow (and Keras) based SequenceTagger model.
The model uses only the chunking output of the model.
"""
def predict(self, x, batch_size=1):
"""
Predict labels given x.
Args:
x: samples for inference
batch_size (int, optional): forward pass batch size
Returns:
tuple of numpy arrays of chunk labels
"""
model = tf.keras.Model(self.model.input, self.model.output[-1])
return model.predict(x=x, batch_size=batch_size)
class SequencePOSTagger(SequenceTagger):
"""
A sequence POS tagger model written in Tensorflow (and Keras) based SequenceTagger model.
The model uses only the chunking output of the model.
"""
def predict(self, x, batch_size=1):
"""
Predict labels given x.
Args:
x: samples for inference
batch_size (int, optional): forward pass batch size
Returns:
tuple of numpy arrays of POS labels
"""
model = tf.keras.Model(self.model.input, self.model.output[0])
return model.predict(x=x, batch_size=batch_size)
| 9,885 | 35.88806 | 100 | py |
nlp-architect | nlp-architect-master/nlp_architect/models/most_common_word_sense.py | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
import tensorflow as tf
class MostCommonWordSense(object):
def __init__(self, epochs, batch_size, callback_args=None):
self.optimizer = tf.keras.optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
self.loss = "mean_squared_error"
self.epochs = epochs
self.batch_size = batch_size
self.model = None
self.callback_args = callback_args
def build(self, input_dim):
# setup model layers
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(100, activation="relu", input_dim=input_dim))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
model.compile(loss=self.loss, optimizer=self.optimizer)
self.model = model
def fit(self, train_set):
self.model.fit(
train_set["X"], train_set["y"], epochs=self.epochs, batch_size=self.batch_size
)
def save(self, save_path):
self.model.save(save_path)
def load(self, model_path):
self.model = tf.keras.models.load_model(model_path)
def eval(self, valid_set):
eval_rate = self.model.evaluate(valid_set["X"], valid_set["y"], batch_size=self.batch_size)
return eval_rate
def get_outputs(self, valid_set):
return self.model.predict(valid_set)
| 2,100 | 37.907407 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.