repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/multihead_attn_blaslt/setup.py | import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
# print(bare_metal_minor, bare_metal_major)
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
print("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
return int(bare_metal_minor), int(bare_metal_major)
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new
# ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
cc_flag = []
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
cc_flag.append('-gencode')
cc_flag.append('arch=compute_75,code=sm_75')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,code=sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_86,code=sm_86')
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
# subprocess.run(["git", "submodule", "update", "--init", "cutlass"])
# subprocess.run(["git", "clone", "https://github.com/NVIDIA/cutlass.git", "multihead_attn/cutlass"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "ed2ed4d667ce95e1371bd62db32b6a114e774336"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "fe3438a3c1ccbdd03dc1aca3bb68099a9e2a58bd"])
bare_metal_minor, bare_metal_major = check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
print("GENERATOR FLAG:", generator_flag)
ext_modules.append(
CUDAExtension(name='self_multihead_attn_blaslt',
sources=['self_multihead_attn.cpp',
'self_multihead_attn_cuda.cu'],
# include_dirs=[os.path.join(this_dir, 'multihead_attn/cutlass')],
extra_compile_args={'cxx': ['-O3', ] + version_dependent_macros + generator_flag,
'nvcc': ['-O3',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros +
generator_flag}))
setup(
name='test-attn-blaslt',
version='0.1', \
description='CUDA/C++ Pytorch extension for multi-head attention ported from NVIDIA apex',
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| 5,264 | 39.19084 | 101 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/extensions/mlp/mlp_gelu_dropoutadd.py | from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
import silu_cuda
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from ..optimized.compat import half_function
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
try:
import fused_mlp_agelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_agelu = None
try:
import fused_mlp_gelu_dropout_add
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu_dropout_add = None
#
# class MlpReluFunction(torch.autograd.Function):
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx, activation, *args):
# output = fused_mlp.forward(args)
# ctx.save_for_backward(*args)
# ctx.outputs = output
# return output[0]
#
# @staticmethod
# @custom_bwd
# def backward(ctx, grad_o):
# grads = fused_mlp.backward(grad_o, ctx.outputs, ctx.saved_tensors)
# del ctx.outputs
# return (None, *grads)
#
#
class MlpGeLUDropoutAddFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, r_p, *args):
outputs = fused_mlp_gelu_dropout_add.forward(p, r_p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-2]
residual_mask = outputs[-1]
ctx.p = p
ctx.r_p = p
return outputs[0], dropout_mask, residual_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
r_p = ctx.r_p
grads = fused_mlp_gelu_dropout_add.backward(p, r_p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, None, *grads)
if fused_mlp_gelu_dropout_add:
mlp_gelu_dropout_add_function = half_function(MlpGeLUDropoutAddFunction.apply)
else:
mlp_gelu_dropout_add_function = None
class SwishFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, inp):
ctx.save_for_backward(inp)
return silu_cuda.forward(inp)
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
inp, = ctx.saved_tensors
if not ctx.needs_input_grad[0]: return (None,)
return silu_cuda.backward(inp, grad_out)
def fast_silu(input):
return SwishFunction.apply(input)
class FastSiLU(torch.nn.Module):
def forward(self, input):
return fast_silu(input)
class AGELUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, x):
ctx.save_for_backward(x)
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3))))
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
x, = ctx.saved_tensors
SQRT_M2_PI = 0.7978845608
COEFF = 0.044715
BACKCOEFF = 0.1070322243
tanh_outf = torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3)))
retf = 0.5 * x * (1.0 - torch.pow(tanh_outf, 2)) * (SQRT_M2_PI + BACKCOEFF * torch.pow(x, 2)) + 0.5 * (
1.0 + tanh_outf)
return grad_out * retf
if __name__ == '__main__':
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def __init__(self, mlp_sizes, activation='gelu', dropout=0.0, res_dropout=0.0):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
self.res_dropout = res_dropout
if activation is 'relu':
self.activation = 1
elif activation is 'sigmoid':
self.activation = 2
elif activation is 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., std)
def forward(self, input, mask=None, residual_mask=None, ref=False):
if ref:
return self.forward_ref(input, mask, residual_mask)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_agelu_function(self.dropout, input, *self.weights, *self.biases)
return mlp_gelu_dropout_add_function(self.dropout, self.res_dropout, input, *self.weights, *self.biases)
def forward_ref(self, input, mask, res_mask):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
if self.dropout > 0:
output = F.gelu(output) * dropout_mask.view(output.size(0), -1) * pinv
else:
output = F.gelu(output)
i += output.numel()
pinv = 1 / (1 - self.res_dropout)
output = output * res_mask.view(output.size(0), -1) * pinv + input if self.res_dropout > 0 else output + input
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}"
return s
batch_size = 24568
mlp_sizes = [1024, 4096, 1024]
# mlp_sizes = [4, 7, 4]
num_iters = 20
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
mlp = MLP(mlp_sizes, activation='gelu', dropout=0.0, res_dropout=0.0).cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(8):
bsz = random.randint(2850, batch_size // 8) * 8
test_input = torch.empty(bsz, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask, residual_mask = mlp(test_input)
ref_out = ref_mlp.forward(ref_input, dropout_mask, residual_mask, ref=True)
print(dropout_mask.sum() / dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[0].grad.detach().cpu().numpy(),
ref_mlp.biases[0].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
ref_input = test_input.clone().detach()
mlp_out, dropout_mask, residual_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, residual_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
mlp = MLP(mlp_sizes).cuda().half()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
# mlp_layers.append(nn.ReLU(inplace=True))
mlp_layers.append(torch.nn.GELU())
mlp_layers.append(nn.Dropout(0.0))
else:
mlp_layers.append(nn.Dropout(0.0))
ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
test_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(100):
ref_out = ref_mlp(ref_input) + ref_input
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out, _, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input) + ref_input
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
unittest.main()
# test = TestMLP()
# test.test_creation()
# test.test_performance_half()
# test.test_with_bias()
| 11,654 | 32.491379 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/batch_ensemble/be_relative_attention.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
class BatchEnsembleMM(object):
@staticmethod
def forward(x, weight, bias, ensemble_r, ensemble_s):
"""
:param x: [T x B x H]
:param weight: [H_out x H]
:param bias: [H_out]
:param ensemble_r: [B x H]
:param ensemble_s: [B x H_out]
:return:
"""
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = weight.size(0)
assert bsz == ensemble_s.size(0)
# assert ensemble * bsz_per_ensemble == bsz, "Mini-batch must divide evenly to the ensembles"
# element-wise [T x B x H] \times [B x H]
x_r = torch.mul(x, ensemble_r)
# GEMM No Bias. Otherwise use addmm
x_mm = torch.mm(x_r.view(-1, hin), weight.transpose(0, 1))
x_mm = x_mm.view(len_x, bsz, hout)
# element-wise [T x B x Hout] \times [B x Hout]
x_s = torch.mul(x_mm, ensemble_s)
# add bias
x_s = torch.add(x_s, bias)
# we need to store the intermediate results for the backward pass
return x_s, x_mm, x_r
# maybe we need some allocated memory as well
@staticmethod
def backward(grad_y, x, x_r, x_mm, weight, ensemble_r, ensemble_s, need_grad_x=True):
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = x_mm.size(-1)
grad_bias = grad_y
grad_s = grad_y
# backprop through the last element-wise multiplication
grad_ensemble_s = torch.mul(grad_s, x_mm)
grad_ensemble_s = torch.sum(grad_ensemble_s, dim=0)
# backprop through the MM
grad_mm = torch.mul(grad_s, ensemble_s)
grad_mm = grad_mm.view(-1, hout)
grad_r = torch.mm(grad_mm, weight).view(len_x, bsz, hin)
# GEMM: [hout x bsz] \times [bsz x hin]
grad_weight = torch.mm(grad_mm.transpose(0, 1), x_r.view(-1, hin))
# back prop through the first element-wise multiplication
# element-wise [len_x, bsz, hin] \cdot [bsz, hin]
if need_grad_x :
grad_x = torch.mul(grad_r, ensemble_r)
else:
grad_x = None
# grad ensemble r
grad_ensemble_r = torch.mul(grad_r, x)
grad_ensemble_r = torch.sum(grad_ensemble_r, dim=0)
return grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s
mm = BatchEnsembleMM
class RelativeShiftFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, batch_first, emb_last):
assert len(x.shape) == 3, "Input must have 3 dimensions B x len_q x len_r!"
ctx.batch_first = batch_first
ctx.emb_last = emb_last
return RelativeShift.forward(x, batch_first, emb_last)
@staticmethod
def backward(ctx, grad_x):
batch_first = ctx.batch_first
emb_last = ctx.emb_last
return RelativeShift.backward(grad_x, batch_first, emb_last), False, False
class RelativeShift(object):
@staticmethod
def forward(x, batch_first, emb_last):
assert len(x.shape) == 3, "Input must have 3 dimensions B x len_q x len_r or len_q x len_r x demb!"
assert (batch_first or emb_last) and not(batch_first and emb_last), \
"Batch first and Embedding last must be mutually exclusive"
if batch_first:
bsz = x.size(0)
zero_pad = torch.zeros((bsz, x.size(1), 1),
device=x.device, dtype=x.dtype)
# padded into [T x T+1 x (B x H)]
x_padded = torch.cat([zero_pad, x], dim=2)
# view into [T+1 x T x (BxH)]
x_view = x_padded.view(bsz, x.size(2) + 1, x.size(1))
# remove the first collumn
x = x_view[:, 1:].view_as(x)
else:
raise NotImplementedError
return x
@staticmethod
def backward(grad_x, batch_first, emb_last):
if batch_first:
bsz = grad_x.size(0)
len_q, len_r = grad_x.size(1), grad_x.size(2)
grad_x_view = grad_x.view(bsz, len_r, len_q)
zero_pad = torch.zeros((bsz, 1, len_q), device=grad_x.device, dtype=grad_x.dtype)
# grad_x should have size B x len_q x len_r
# x_view should have size B x len_q+1 x len_r
# put the zeros into the missing gradients
grad_x_view = torch.cat([zero_pad, grad_x_view], dim=1)
# print(grad_x_view.size())
grad_x_padded = grad_x_view.view(bsz, len_q, len_r + 1)
# because the first index in the padded dim was from zero_pad
grad_output = grad_x_padded[:, :, 1:]
else:
raise NotImplementedError
return grad_output
class RelativeSelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, pos, use_time_mask, is_training, heads, ensemble,
input_weights, output_weights, pos_weights,
input_biases, output_biases, pos_biases,
r_i, s_i, r_p, s_p,
r_w_bias, r_r_bias,
mask, dropout_prob,
incremental, incremental_cache,
double_precision):
"""
:param double_precision: ops at float64, only for debugging
:param ctx: context object to stash information for backward
:param inputs: input hidden states [len_q x batch_size x hidden]
:param pos: [len_k x 1 x hidden]
:param use_time_mask: bool, if we use the causal mask for decoder
:param is_training: training state, for dropout
:param heads: number of heads
:param input_weights: weight matrix [hidden x 3*hidden]
:param output_weights: output weight [hidden x hidden]
:param input_biases: bias [3*hidden]
:param output_biases: output bias [bias]
:param pos_biases:
:param pos_weights:
:param r_w_bias:
:param r_r_bias:
:param mask: None or [B x T] or [T x T]
:param dropout_prob:
:param incremental:
:param incremental_cache:
:return:
"""
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
bsz, len_q = inputs.size(1), inputs.size(0)
len_r = pos.size(0) # r can be longer than query, i.e for bidirectional attention we need 2k+1 positions
len_k = len_q # because of self-attention
if not is_training:
bsz = bsz // ensemble
if pos.size(1) == 1:
pos = pos.repeat(1, bsz, 1) # to T x B x H
# # Input Linear GEMM
# # input1: (activations) [len_q, bsz, hidden]
# # input2: (weights) [hidden*3 (3072), hidden (1024)] (transpose [0,1])
# # output: [len_q, bsz, hidden*3]
# # GEMM: ( (len_q*bsz) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (len_q*bsz x embed_dim*3)
qkv, qkv_mm, qkv_r = mm.forward(inputs, input_weights, input_biases, r_i, s_i)
if not is_training:
qkv = qkv.view(len_q, ensemble, bsz, qkv.size(-1))
qkv = torch.mean(qkv, dim=1)
rpos, rpos_mm, rpos_r = mm.forward(pos, pos_weights, pos_biases, r_p, s_p)
if not is_training:
rpos = rpos.view(len_r, ensemble, bsz, rpos.size(-1))
rpos = torch.mean(rpos, dim=1)
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [len_q, bsz, heads(16), 3, head_dim(64)]
# input_lin_results: [len_q, batches=bsz*heads, 3, head_dim]
qkv = qkv.view(len_q, bsz * heads, 3, head_dim)
queries = qkv[:, :, 0, :]
keys = qkv[:, :, 1, :]
values = qkv[:, :, 2, :]
r_head_k = rpos.view(pos.size(0), bsz * heads, head_dim) # T x BxH x D
if incremental:
# We have to change the heads x head_dim first and then concat to the T dim
# bsz is changed during translation due to beam search
# during translation we want to keep the actual T dim in MM as 1 constantly
keys = keys.contiguous().view(len_q, bsz, heads * head_dim)
values = values.contiguous().view(len_q, bsz, heads * head_dim)
if 'k' in incremental_cache and 'v' in incremental_cache:
keys = torch.cat([incremental_cache['k'], keys], dim=0) # time first
incremental_cache['k'] = keys
values = torch.cat([incremental_cache['v'], values], dim=0) # time first
incremental_cache['v'] = values
else:
incremental_cache['k'] = keys
incremental_cache['v'] = values
keys = keys.view(-1, bsz * heads, head_dim)
values = values.view(-1, bsz * heads, head_dim)
# Relative Attention from here:
# r_w_bias size: head * head_dim
rw_head_q = queries.view(len_q, bsz, heads, head_dim) + r_w_bias #
rw_head_q = rw_head_q.view(len_q, bsz * heads, head_dim)
# matmul1 batched GEMMs
# queries+bias: [len_q, bsz*heads, head_dim] transpose(0, 1)
# keys: [len_k, bsz*heads, head_dim] transpose(0, 1)
if queries.is_cuda:
matmul_ac = torch.empty((bsz * heads, queries.size(0), keys.size(0)), dtype=queries.dtype,
device=rw_head_q.device)
matmul_ac = torch.baddbmm(matmul_ac, rw_head_q.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2),
out=matmul_ac, beta=0.0, alpha=scale_t[0])
else:
matmul_ac = torch.bmm(rw_head_q.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2)).mul_(scale_t[0])
rr_head_q = queries.view(len_q, bsz, heads, head_dim) + r_r_bias #
rr_head_q = rr_head_q.view(len_q, bsz * heads, head_dim)
# matmul2 batched GEMMs
# queries+bias: [len_q, bsz*heads, head_dim] transpose(0, 1)
# rel_positions: [len_r, bsz*heads, head_dim] transpose(0, 1)
if queries.is_cuda:
matmul_bd = torch.empty((bsz * heads, queries.size(0), len_r), dtype=queries.dtype,
device=rw_head_q.device)
matmul_bd = torch.baddbmm(matmul_bd, rr_head_q.transpose(0, 1), r_head_k.transpose(0, 1).transpose(1, 2),
out=matmul_bd, beta=0.0, alpha=scale_t[0])
else:
matmul_bd = torch.matmul(rr_head_q.transpose(0, 1), r_head_k.transpose(0, 1).transpose(1, 2)) \
.mul_(scale_t[0])
# shift so that the relative positions are aligned
# the first element will have 0 -1 ... -n relative positions compared to other elements
# the last element will have n-1 n-2 ... 0
matmul_bd = RelativeShift.forward(matmul_bd, True, False)
# if len_r is longer than len_k, then we need to take the first len_k positions only
matmul_bd = matmul_bd[:, :, :len_k]
attn_score = matmul_ac + matmul_bd # both AC and BD are scaled with scale_t before in baddbmm
# attn_score should have size [bsz*heads, len_q, len_k] for now
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert (len(mask.size()) == 2), "Timing mask is not 2D!"
# assert (mask.size(0) == mask.size(1)), "Sequence length should match!"
mask = mask.to(torch.bool)
attn_score = attn_score.masked_fill_(mask, float('-inf'))
# Key Padding Mask
else:
batches, len_q, seql_k = attn_score.size()
bsz = int(batches / heads)
attn_score = attn_score.view(bsz, heads, len_q, seql_k)
mask = mask.to(torch.bool)
attn_score = attn_score.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float('-inf'))
attn_score = attn_score.view(bsz * heads, len_q, seql_k)
dtype_ = torch.float64 if double_precision else torch.float32
softmax_results = F.softmax(attn_score, dim=-1, dtype=dtype_).type_as(attn_score)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [bsz*heads, len_q, seql_k]
# Input2: (values) [seql_v, bsz*heads, head_dim] transpose(0,1)
# Output: [len_q, bsz*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( len_q x seql_k ) x ( seql_k x head_dim ) = (len_q x head_dim)
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=queries.device).transpose(1, 0)
torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
matmul2_results = matmul2_results.transpose(0, 1).contiguous().view(len_q, bsz, inputs.size(2))
# Output Linear GEMM
# Input1: (activations) [len_q, bsz, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ len_q, bsz, embed_dim ]
# GEMM: ( len_q*bsz x embed_dim ) x ( embed_dim x embed_dim ) = ( len_q*bsz x embed_dim )
# outputs = torch.addmm(output_biases,
# matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
# output_weights.transpose(0, 1),
# beta=1., alpha=1.)
#
# outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
o_input = matmul2_results
# o, o_mm, o_r = mm.forward(o_input, output_weights, output_biases, r_o, s_o)
# outputs = o
outputs = torch.addmm(output_biases,
matmul2_results.view(len_q * bsz, inputs.size(2)),
output_weights.transpose(0, 1),
beta=1., alpha=1.)
outputs = outputs.view(len_q, bsz, output_weights.size(0))
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results, #
dropout_results,
softmax_results,
qkv, qkv_mm, qkv_r,
rpos_r, rpos_mm,
rw_head_q, rr_head_q,
inputs, pos, r_head_k,
input_weights, pos_weights, output_weights,
r_i, s_i, r_p, s_p,
r_w_bias, r_r_bias,
dropout_mask,
dropout_prob_t)
# with torch.no_grad():
# coverage = softmax_results.new(*softmax_results.size()).copy_(softmax_results)
coverage = softmax_results
return outputs.detach(), coverage
# return outputs.detach()
@staticmethod
def backward(ctx, output_grads, softmax_grads):
# def backward(ctx, output_grads):
"""
:param ctx:
:param output_grads: gradients w.r.t the outputs
:param softmax_grads: unncessary except we use the attention weights somewhere
:return:
"""
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
qkv, qkv_mm, qkv_r, \
rpos_r, rpos_mm, \
rw_head_q, rr_head_q, \
inputs, pos, r_head_k, \
input_weights, pos_weights, output_weights, \
r_i, s_i, r_p, s_p, \
r_w_bias, r_r_bias, \
dropout_mask, \
dropout_prob_t = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t[0]
len_q, bsz = inputs.size(0), inputs.size(1)
len_r = pos.size(0)
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# input_lin_results: [len_q, bsz, heads(16), 3, head_dim(64)]
# input_lin_results: [len_q, batches=bsz*heads, 3, head_dim]
qkv = qkv.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = qkv[:, :, 0, :]
keys = qkv[:, :, 1, :]
values = qkv[:, :, 2, :]
# The tensor is declared before hand to properly slice out query, key, and value grads.
qkv_grads = torch.empty_like(qkv)
queries_grads = qkv_grads[:, :, 0, :]
keys_grads = qkv_grads[:, :, 1, :]
values_grads = qkv_grads[:, :, 2, :]
# Output Linear Projection
o_input = matmul2_results
# output_lin_grads, output_weights_grads, output_biases_grads, r_o_grads, s_o_grads \
# = mm.backward(output_grads, o_input, o_r, o_mm, output_weights, r_o, s_o)
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
output_weights_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
output_biases_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0)
# Matmul2 - DGRAD1
# Input1: (data grads) [len_q, bsz*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, bsz*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [bsz*heads, len_q, seql_k]
# GEMM: Per batch: ( len_q x head_dim ) x ( head_dim x seql_k ) = ( len_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [len_q, bsz*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, bsz*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [bsz*heads, len_q, seql_k]
# GEMM: Per batch: ( len_q x head_dim ) x ( head_dim x seql_k ) = ( len_q x seql_k )
torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# print("Reached here")
# Mask and Scaling for Dropout (not a publically documented op)
if dropout_prob_t[0] > 0.0:
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
else:
dropout_grads = matmul2_dgrad1
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
attn_score_grads = softmax_grads
# the grads are evenly distributed to AC and BD
matmul_ac_grads = attn_score_grads
# Matmul1 - DGRAD1
# Input1: (data grads) [bsz*heads, len_q, seql_k]
# Input2: (activations) [seql_k, bsz*heads, head_dim] transpose(0,1)
# Output: [bsz*heads, len_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( len_q x seql_k ) x ( seql_k x head_dim ) = ( len_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), matmul_ac_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
queries_grads_ac = queries_grads
r_w_bias_grads = torch.sum(queries_grads_ac.view(len_q, bsz, heads_t[0], -1), dim=[0, 1]) # heads * head_dim
matmul_bd_grads = attn_score_grads
if len_r > len_q: # if we cut off the BDs from before, then put the zero gradients behind
grad_cut = matmul_bd_grads.new_zeros((matmul_bd_grads.size(0), matmul_bd_grads.size(1), len_r - len_q))
matmul_bd_grads = torch.cat([matmul_bd_grads, grad_cut], dim=-1)
# backprop through the shifting
matmul_bd_grads = RelativeShift.backward(matmul_bd_grads, True, False)
# Matmul1 - DGRAD1
# Input1: (matmul_bd_grads) [bsz*heads, len_q, seql_k]
# Input2: (r_head_k) [len_q, bsz*heads, head_dim] transpose(0,1)
# Output: [bsz*heads, len_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( len_q x seql_k ) x ( seql_k x head_dim ) = ( len_q x head_dim )
queries_grads_bd = queries_grads.new_empty(*queries_grads.size())
torch.baddbmm(queries_grads_bd.transpose(0, 1), matmul_bd_grads, r_head_k.transpose(0, 1),
out=queries_grads_bd.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# len_q x batch*heads x d_head
r_r_bias_grads = torch.sum(queries_grads_bd.view(len_q, bsz, heads_t[0], -1), dim=[0, 1])
# add the gradients from bd to queries
queries_grads.add_(queries_grads_bd)
# # MatmulAC - DGAD2
# Input1: (data grads) [bsz*heads, len_q, seql_k] transpose(1,2)
# Input2: (rw_head_q) [bsz*heads, head_dim, len_q] transpose(0,1)
# Output: [seql_k, bsz*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x len_q ) x ( len_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), matmul_ac_grads.transpose(1, 2),
rw_head_q.transpose(0, 1), out=keys_grads.transpose(0, 1),
beta=0.0, alpha=scale_t[0])
# MatmulBD - DGRAD2
# Input1: (data grads) [bsz*heads, len_q, len_r] transpose(1,2)
# Input2: (rr_head_q) [len_q, bsz*heads, head_dim] transpose(0,1)
# Output: r_head_k [len_r, bsz*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x len_q ) x ( len_q x head_dim ) = ( seql_k x head_dim )
r_head_k_grad = r_head_k.new_empty((len_r, bsz * heads_t[0], head_dim))
# rr_head_q = queries.view(len_q, bsz, heads_t[0], head_dim) + r_r_bias #
# rr_head_q = rr_head_q.view(len_q, bsz * heads_t[0], head_dim)
torch.baddbmm(r_head_k_grad.transpose(0, 1), matmul_bd_grads.transpose(1, 2).contiguous(),
rr_head_q.transpose(0, 1), out=r_head_k_grad.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# r_head_k_grad = torch.matmul(matmul_bd_grads.transpose(1, 2), rr_head_q.transpose(0, 1))
r_head_k_grad = r_head_k_grad.view(len_r, bsz, heads_t[0] * head_dim)
# Input Linear GEMM - DGRAD
# input1: (data grads) [len_q, bsz, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [len_q, bsz, embed_dim]
# GEMM: ( (len_q*bsz) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (len_q*bsz x embed_dim)
qkv_grads = qkv_grads.view(inputs.size(0), inputs.size(1), heads_t[0] * 3 * head_dim)
input_grads, input_weights_grads, input_biases_grads, r_i_grads, s_i_grads = \
mm.backward(qkv_grads, inputs, qkv_r, qkv_mm, input_weights, r_i, s_i)
_, pos_weights_grads, pos_biases_grads, r_p_grads, s_p_grads = \
mm.backward(r_head_k_grad, pos, rpos_r, rpos_mm, pos_weights, r_p, s_p, need_grad_x=False)
return input_grads, None, None, None, None, None, \
input_weights_grads, output_weights_grads, pos_weights_grads, \
input_biases_grads, output_biases_grads, pos_biases_grads, \
r_i_grads, s_i_grads, r_p_grads, s_p_grads, \
r_w_bias_grads, r_r_bias_grads, \
None, None, None, None, None
relative_self_attn_func = RelativeSelfAttnFunc.apply
class BERelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., ensemble=1):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = True
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.pos_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
self.pos_proj_bias = Parameter(torch.Tensor(embed_dim))
self.r_i = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(ensemble, 3 * embed_dim))
# self.r_o = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
# self.s_o = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.r_p = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.s_p = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.r_w_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.r_r_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.reset_parameters()
self.attn_func = RelativeSelfAttnFunc.apply
def reset_parameters(self, init='normal'):
# nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
# nn.init.xavier_uniform_(self.out_proj_weight)
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
nn.init.normal_(self.pos_proj_weight, 0.0, std_)
else:
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
nn.init.uniform_(self.pos_proj_weight, -std_, std_)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
nn.init.constant_(self.pos_proj_bias, 0.)
nn.init.normal_(self.r_w_bias, 0.0, 0.02)
nn.init.normal_(self.r_r_bias, 0.0, 0.02)
with torch.no_grad():
self.r_i.bernoulli_(0.5).mul_(-2).add_(1)
self.s_i.bernoulli_(0.5).mul_(-2).add_(1)
# self.r_o.bernoulli_(0.5).mul_(-2).add_(1)
# self.s_o.bernoulli_(0.5).mul_(-2).add_(1)
self.r_p.bernoulli_(0.5).mul_(-2).add_(1)
self.s_p.bernoulli_(0.5).mul_(-2).add_(1)
def forward(self, input, pos, key_padding_mask=None, attn_mask=None, indices=None, mems=None,
incremental=False, incremental_cache=None, double_precision=False):
bsz = input.size(1)
ensemble = self.r_i.size(0)
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
if len(mask.shape) == 3:
mask = mask.squeeze(0).transpose(0, 1)
elif attn_mask is not None:
mask = attn_mask
if len(mask.shape) == 3:
mask = mask.squeeze(-1)
else:
mask = None
if self.training:
if indices is None:
with torch.no_grad():
indices = torch.arange(0, bsz, device=input.device, dtype=torch.long)
indices = torch.remainder(indices, ensemble)
r_i = torch.index_select(self.r_i, 0, indices)
s_i = torch.index_select(self.s_i, 0, indices)
# r_o = torch.index_select(self.r_o, 0, indices)
# s_o = torch.index_select(self.s_o, 0, indices)
r_p = torch.index_select(self.r_p, 0, indices)
s_p = torch.index_select(self.s_p, 0, indices)
else:
input = input.repeat(1, ensemble, 1)
pos = pos.repeat(1, ensemble, 1)
# if key_padding_mask is not None:
# mask = mask.repeat(ensemble, 1)
r_i = self.r_i.repeat(bsz, 1).view(bsz, ensemble, self.r_i.size(-1)). \
transpose(0, 1).contiguous().view(-1, self.r_i.size(-1))
s_i = self.s_i.repeat(bsz, 1).view(bsz, ensemble, self.s_i.size(-1)). \
transpose(0, 1).contiguous().view(-1, self.s_i.size(-1))
r_p = self.r_p.repeat(bsz, 1).view(bsz, ensemble, self.r_p.size(-1)). \
transpose(0, 1).contiguous().view(-1, self.r_p.size(-1))
s_p = self.s_p.repeat(bsz, 1).view(bsz, ensemble, self.s_p.size(-1)). \
transpose(0, 1).contiguous().view(-1, self.s_p.size(-1))
# r_o = self.r_o.repeat(bsz, 1).view(bsz, ensemble, self.r_o.size(-1)). \
# transpose(0, 1).contiguous().view(-1, self.r_o.size(-1))
# s_o = self.s_o.repeat(bsz, 1).view(bsz, ensemble, self.s_o.size(-1)). \
# transpose(0, 1).contiguous().view(-1, self.r_o.size(-1))
is_training = self.training
outputs, coverage = self.attn_func(input, pos, attn_mask is not None, is_training, self.num_heads, ensemble,
self.in_proj_weight, self.out_proj_weight, self.pos_proj_weight,
self.in_proj_bias, self.out_proj_bias, self.pos_proj_bias,
r_i, s_i, r_p, s_p,
self.r_w_bias, self.r_r_bias,
mask, self.dropout,
incremental, incremental_cache, double_precision)
# last False is double precision
return outputs, coverage
if __name__ == "__main__":
bsz = 4
seq_len_q = 4
seq_len_kv = 7
embed_dim = 32
n_heads = 4
output_size = 32
ensemble = 7
class TestNetwork(nn.Module):
def __init__(self):
super(TestNetwork, self).__init__()
self.func = relative_self_attn_func
self.n_heads = n_heads
def forward(self, q, r, input_weights, output_weights, pos_weights,
input_biases, output_biases, pos_biases,
r_i, s_i, r_o, s_o, r_p, s_p,
r_w_bias, r_r_bias):
use_time_mask = False
mask = None
is_training = True
incremental = False
incremental_cache = None
double_precision = True
dropout_prob = 0.0
heads = self.n_heads
output, coverage = self.func(q, r, use_time_mask, is_training, heads,
input_weights, output_weights, pos_weights,
input_biases, output_biases, pos_biases,
r_i, s_i, r_o, s_o, r_p, s_p,
r_w_bias, r_r_bias,
mask, dropout_prob,
incremental, incremental_cache, double_precision)
return output
r_w_bias = nn.Parameter(torch.Tensor(n_heads, embed_dim//n_heads)).double().cuda()
r_r_bias = nn.Parameter(torch.Tensor(n_heads, embed_dim//n_heads)).double().cuda()
in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)).double().cuda()
pos_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)).double().cuda()
pos_proj_bias = Parameter(torch.Tensor(embed_dim)).double().cuda()
out_proj_bias = Parameter(torch.Tensor(embed_dim)).double().cuda()
r_i = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_i = torch.nn.Parameter(torch.Tensor(bsz, 3 * embed_dim)).double().cuda()
r_p = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_p = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
r_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
std_ = math.sqrt(2.0 / (embed_dim + embed_dim))
nn.init.normal_(in_proj_weight, 0.0, std_)
nn.init.normal_(pos_proj_weight, 0.0, std_)
nn.init.normal_(out_proj_weight, 0.0, std_)
nn.init.normal_(r_w_bias, 0.0, std_)
nn.init.normal_(r_r_bias, 0.0, std_)
torch.nn.init.constant_(in_proj_bias, 0.0)
torch.nn.init.constant_(out_proj_bias, 0.0)
torch.nn.init.constant_(pos_proj_bias, 0.0)
with torch.no_grad():
r_i.bernoulli_(0.5).mul_(-2).add_(1)
s_i.bernoulli_(0.5).mul_(-2).add_(1)
r_p.bernoulli_(0.5).mul_(-2).add_(1)
s_p.bernoulli_(0.5).mul_(-2).add_(1)
r_o.bernoulli_(0.5).mul_(-2).add_(1)
s_o.bernoulli_(0.5).mul_(-2).add_(1)
model = TestNetwork()
q = torch.randn((seq_len_q, bsz, embed_dim), requires_grad=True)
r = torch.randn((seq_len_kv, bsz, embed_dim), requires_grad=False)
model = model.double().cuda()
q = q.double().cuda()
r = r.double().cuda()
print("Gradchecking ...")
torch.autograd.gradcheck(model, (q, r, in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_i, s_i, r_o, s_o, r_p, s_p,
r_w_bias, r_r_bias))
print("Gradcheck successful!!!") | 34,591 | 45 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/batch_ensemble/be_encdec_attention.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
# from onmt.constants import double_precision
# from .batch_ensemble_linear import BatchEnsembleMM as mm
class BatchEnsembleMM(object):
@staticmethod
def forward(x, weight, bias, ensemble_r, ensemble_s):
"""
:param x: [T x B x H]
:param weight: [H_out x H]
:param bias: [H_out]
:param ensemble_r: [B x H]
:param ensemble_s: [B x H_out]
:return:
"""
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = weight.size(0)
assert bsz == ensemble_s.size(0)
# assert ensemble * bsz_per_ensemble == bsz, "Mini-batch must divide evenly to the ensembles"
# element-wise [T x B x H] \times [B x H]
x_r = torch.mul(x, ensemble_r)
# GEMM No Bias. Otherwise use addmm
x_mm = torch.mm(x_r.view(-1, hin), weight.transpose(0, 1))
x_mm = x_mm.view(len_x, bsz, hout)
# element-wise [T x B x Hout] \times [B x Hout]
x_s = torch.mul(x_mm, ensemble_s)
# add bias
x_s = torch.add(x_s, bias)
# we need to store the intermediate results for the backward pass
return x_s, x_mm, x_r
# maybe we need some allocated memory as well
@staticmethod
def backward(grad_y, x, x_r, x_mm, weight, ensemble_r, ensemble_s):
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = x_mm.size(-1)
grad_bias = torch.sum(grad_y, (0, 1))
grad_s = grad_y
# backprop through the last element-wise multiplication
grad_ensemble_s = torch.mul(grad_s, x_mm)
grad_ensemble_s = torch.sum(grad_ensemble_s, dim=0)
# backprop through the MM
grad_mm = torch.mul(grad_s, ensemble_s)
grad_mm = grad_mm.view(-1, hout)
grad_r = torch.mm(grad_mm, weight).view(len_x, bsz, hin)
# GEMM: [hout x bsz] \times [bsz x hin]
grad_weight = torch.mm(grad_mm.transpose(0, 1), x_r.view(-1, hin))
# back prop through the first element-wise multiplication
# element-wise [len_x, bsz, hin] \cdot [bsz, hin]
grad_x = torch.mul(grad_r, ensemble_r)
# grad ensemble r
grad_ensemble_r = torch.mul(grad_r, x)
grad_ensemble_r = torch.sum(grad_ensemble_r, dim=0)
return grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s
mm = BatchEnsembleMM
class EncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, ensemble,
inputs_q, inputs_kv,
input_weights_q, input_weights_kv, output_weights,
input_biases_q, input_biases_kv, output_biases,
r_q, s_q, r_kv, s_kv,
mask, dropout_prob,
incremental, incremental_cache, double_precision):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs_q.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
bsz, len_q, len_k = inputs_q.size(1), inputs_q.size(0), inputs_kv.size(0)
if not is_training:
bsz = bsz // ensemble
# TODO: add incremental cache
# Linear Projection Q
q, q_mm, q_r = mm.forward(inputs_q, input_weights_q, input_biases_q, r_q, s_q)
if not is_training:
q = q.view(len_q, ensemble, bsz, q.size(-1))
q = torch.mean(q, dim=1)
# input_lin_q_results = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1), input_weights_q.size(0))
# print(q.size())
queries = q.view(q.size(0), q.size(1) * heads, head_dim)
# Linear Projection KV
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
keys = incremental_cache['c_k']
values = incremental_cache['c_v']
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
kv = torch.stack([keys, values], dim=-2)
else:
kv, kv_mm, kv_r = mm.forward(inputs_kv, input_weights_kv, input_biases_kv, r_kv, s_kv)
if not is_training:
kv = kv.view(kv.size(0), ensemble, kv.size(1) // ensemble, kv.size(-1))
kv = torch.mean(kv, dim=1)
kv = kv.view(kv.size(0), kv.size(1) * heads, 2, head_dim)
keys = kv[:, :, 0, :]
values = kv[:, :, 1, :]
if incremental:
keys = keys.contiguous().view(len_k, bsz, heads * head_dim)
values = values.contiguous().view(len_k, bsz, heads * head_dim)
incremental_cache['c_k'] = keys
incremental_cache['c_v'] = values
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
if queries.is_cuda:
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results, beta=0.0, alpha=scale_t[0])
else:
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
# Self Attention Time Mask
mask = mask.to(torch.bool)
if len(mask.shape) == 3:
mask = mask.unsqueeze(1) # for the head dimension
else:
mask = mask.unsqueeze(1).unsqueeze(2) # for the head and query dimension
batches, seql_q, seql_k = matmul1_results.size()
bsz = int(batches / heads)
matmul1_results = matmul1_results.view(bsz, heads, seql_q, seql_k)
mask = mask.to(torch.bool)
# after unsqueezing the mask should have size [bsz x 1 x 1 x seql_k]
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
matmul1_results = matmul1_results.view(bsz * heads, seql_q, seql_k)
dtype_ = torch.float64 if double_precision else torch.float32
softmax_results = F.softmax(matmul1_results, dim=-1, dtype=dtype_).type_as(matmul1_results)
# softmax_results = F.softmax(matmul1_results.float(), dim=-1).type_as(matmul1_results)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
if queries.is_cuda:
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=dropout_results.device).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
else:
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1))
matmul2_results = matmul2_results.transpose(0, 1).contiguous().view(len_q, bsz, inputs_q.size(2))
# # Output Linear GEMM
# # Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# # Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# # Output: [ seql_q, seqs, embed_dim ]
# # GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
# outputs = torch.mm(matmul2_results.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
# output_weights.transpose(0, 1))
# outputs = outputs.view(inputs_q.size(0), inputs_q.size(1), output_weights.size(0))
# Output Linear Projection
o_input = matmul2_results
# o, o_mm, o_r = mm.forward(o_input, output_weights, output_biases, r_o, s_o)
o = torch.addmm(output_biases,
o_input.view(len_q * bsz, o_input.size(2)),
output_weights.transpose(0, 1),
beta=1., alpha=1.)
outputs = o.view(len_q, bsz, output_weights.size(0))
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
q, q_mm, q_r,
kv, kv_mm, kv_r,
inputs_q,
inputs_kv,
input_weights_q, input_biases_q, r_q, s_q,
input_weights_kv, input_biases_kv, r_kv, s_kv,
output_weights, output_biases,
dropout_mask,
dropout_prob_t)
# return o.detach()
with torch.no_grad():
softmax_results = softmax_results.new(*softmax_results.size()).copy_(softmax_results)
return outputs.detach(), softmax_results
@staticmethod
def backward(ctx, output_grads, softmax_grads):
heads_t, scale_t, matmul2_results, dropout_results, softmax_results \
, q, q_mm, q_r, kv, kv_mm, kv_r \
, inputs_q, inputs_kv \
, input_weights_q, input_biases_q, r_q, s_q \
, input_weights_kv, input_biases_kv, r_kv, s_kv \
, output_weights, output_biases \
, dropout_mask, dropout_prob_t \
= ctx.saved_tensors
head_dim = inputs_q.size(2) // heads_t[0]
# Slice out k,v from one big Input Linear output (should only impact meta data, no copies!)
# Batch sizes and heads are combined to make the batch of the Batched GEMM
# input_lin_kv_results: [seql_k, bsz, heads(16), 2, head_dim(64)]
# input_lin_kv_results: [seql_k, batches=bsz*heads, 2, head_dim]
queries = q.view(inputs_q.size(0), inputs_q.size(1) * heads_t[0], head_dim)
kv = kv.view(inputs_kv.size(0), inputs_kv.size(1) * heads_t[0], 2, head_dim)
keys = kv[:, :, 0, :]
values = kv[:, :, 1, :]
# Slice out k,v from one big set of gradients entering the input linear's bprop
# (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
kv_grads = torch.empty_like(kv)
queries_grads = torch.empty_like(queries)
keys_grads = kv_grads[:, :, 0, :]
values_grads = kv_grads[:, :, 1, :]
# Output Linear Projection
o_input = matmul2_results
# output_lin_grads, output_weights_grads, output_biases_grads, r_o_grads, s_o_grads \
# = mm.backward(output_grads, o_input, o_r, o_mm, output_weights, r_o, s_o)
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
output_weights_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_biases_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1) * heads_t[0],
head_dim).transpose(0, 1)
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
# print(output_lin_grads.size(), values.size())
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
queries_grads = torch.baddbmm(queries_grads.transpose(0, 1), softmax_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), softmax_grads.transpose(1, 2), queries.transpose(0, 1),
out=keys_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Input Q Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim (1024), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
queries_grads = queries_grads.transpose(0, 1).view(inputs_q.size(0), inputs_q.size(1), heads_t[0] * head_dim)
# print("Reached 2 here")
# print(queries_grads.size(), q_r.size(), q_mm.size())
inputs_q_grads, input_weights_q_grads, input_biases_q_grads, r_q_grads, s_q_grads \
= mm.backward(queries_grads, inputs_q, q_r, q_mm, input_weights_q, r_q, s_q)
kv_grads = kv_grads.view(inputs_kv.size(0), inputs_kv.size(1), heads_t[0] * 2 * head_dim)
inputs_kv_grads, input_weights_kv_grads, input_biases_kv_grads, r_kv_grads, s_kv_grads \
= mm.backward(kv_grads, inputs_kv, kv_r, kv_mm, input_weights_kv, r_kv, s_kv)
return None, None, None, None \
, inputs_q_grads, inputs_kv_grads \
, input_weights_q_grads, input_weights_kv_grads, output_weights_grads \
, input_biases_q_grads, input_biases_kv_grads, output_biases_grads \
, r_q_grads, s_q_grads, r_kv_grads, s_kv_grads \
, None, None, None, None, None
# encdec_attn_func = EncdecAttnFunc.apply
class BEEncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, num_heads, embed_dim, attn_drop=0., ensemble=1):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = attn_drop
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = False
self.scaling = self.head_dim ** -0.5 # this value is hardcoded in the "fast" implementation
self.in_proj_weight_q = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_weight_kv = Parameter(torch.Tensor(2 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_bias_q = Parameter(torch.Tensor(embed_dim))
self.in_proj_bias_kv = Parameter(torch.Tensor(2 * embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
self.r_q = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.s_q = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.r_kv = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.s_kv = torch.nn.Parameter(torch.Tensor(ensemble, 2 * embed_dim))
# self.r_o = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
# self.s_o = torch.nn.Parameter(torch.Tensor(ensemble, embed_dim))
self.attn_func = EncdecAttnFunc.apply
self.reset_parameters()
def reset_parameters(self, init='normal'):
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
else: # xavier uniform
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight_q, -std_, std_)
nn.init.uniform_(self.in_proj_weight_kv, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
torch.nn.init.constant_(self.in_proj_bias_q, 0.0)
torch.nn.init.constant_(self.in_proj_bias_kv, 0.0)
torch.nn.init.constant_(self.out_proj_bias, 0.0)
with torch.no_grad():
self.r_q.bernoulli_(0.5).mul_(-2).add_(1)
self.s_q.bernoulli_(0.5).mul_(-2).add_(1)
self.r_kv.bernoulli_(0.5).mul_(-2).add_(1)
self.s_kv.bernoulli_(0.5).mul_(-2).add_(1)
# self.r_o.bernoulli_(0.5).mul_(-2).add_(1)
# self.s_o.bernoulli_(0.5).mul_(-2).add_(1)
def forward(self, query, key, value, attn_mask=None, incremental=False, incremental_cache=None,
indices=None, double_precision=False):
assert value is key, "ERROR: Keys and values must be the same."
is_training = self.training
time_masking = False
len_key = key.size(0)
ensemble = self.r_q.size(0)
bsz = query.size(1)
if is_training:
if indices is None:
with torch.no_grad():
indices = torch.arange(0, bsz, device=query.device, dtype=torch.long)
indices = torch.remainder(indices, ensemble)
r_q = torch.index_select(self.r_q, 0, indices)
s_q = torch.index_select(self.s_q, 0, indices)
r_kv = torch.index_select(self.r_kv, 0, indices)
s_kv = torch.index_select(self.s_kv, 0, indices)
#
# r_o = torch.index_select(self.r_o, 0, indices)
# s_o = torch.index_select(self.s_o, 0, indices)
else:
query = query.repeat(1, ensemble, 1)
key = key.repeat(1, ensemble, 1)
# attn_mask = attn_mask.repeat(ensemble, 1, 1)
r_q = self.r_q.repeat(bsz, 1).view(bsz, ensemble, self.r_q.size(-1)).\
transpose(0, 1).contiguous().view(-1, self.r_q.size(-1))
s_q = self.s_q.repeat(bsz, 1).view(bsz, ensemble, self.s_q.size(-1)).\
transpose(0, 1).contiguous().view(-1, self.s_q.size(-1))
r_kv = self.r_kv.repeat(bsz, 1).view(bsz, ensemble, self.r_kv.size(-1)).\
transpose(0, 1).contiguous().view(-1, self.r_kv.size(-1))
s_kv = self.s_kv.repeat(bsz, 1).view(bsz, ensemble, self.s_kv.size(-1)).\
transpose(0, 1).contiguous().view(-1, self.s_kv.size(-1))
# r_o = self.r_o.repeat(bsz, 1).view(bsz, ensemble, self.r_o.size(-1)).\
# transpose(0, 1).contiguous().view(-1, self.r_o.size(-1))
# s_o = self.s_o.repeat(bsz, 1).view(bsz, ensemble, self.s_o.size(-1)).\
# transpose(0, 1).contiguous().view(-1, self.r_o.size(-1))
outputs, coverage = self.attn_func(time_masking, is_training, self.num_heads, ensemble,
query, key,
self.in_proj_weight_q, self.in_proj_weight_kv, self.out_proj_weight,
self.in_proj_bias_q, self.in_proj_bias_kv, self.out_proj_bias,
r_q, s_q, r_kv, s_kv, attn_mask, self.dropout,
incremental, incremental_cache, double_precision)
return outputs, coverage
if __name__ == "__main__":
bsz = 4
seq_len_q = 4
seq_len_kv = 4
embed_dim = 32
n_heads = 4
output_size = 32
ensemble = 7
class TestNetwork(nn.Module):
def __init__(self):
super(TestNetwork, self).__init__()
self.func = EncdecAttnFunc.apply
self.n_heads = 4
def forward(self, q, kv, input_weights_q, input_weights_kv, output_weights,
input_biases_q, input_biases_kv, output_biases,
r_q, s_q, r_kv, s_kv):
use_time_mask = False
mask = None
is_training = True
incremental = False
incremental_cache = None
double_precision = True
dropout_prob = 0.0
heads = self.n_heads
#
# use_time_mask, is_training, heads, inputs_q, inputs_kv,
# input_weights_q, input_weights_kv, output_weights,
# input_biases_q, input_biases_kv, output_biases,
# r_q, s_q, r_kv, s_kv, r_o, s_o,
# mask, dropout_prob,
# incremental, incremental_cache, double_precision
output, coverage = self.func(use_time_mask, is_training, heads, q, kv,
input_weights_q, input_weights_kv, output_weights,
input_biases_q, input_biases_kv, output_biases,
r_q, s_q, r_kv, s_kv,
mask, dropout_prob,
incremental, incremental_cache, double_precision)
return output
in_proj_weight_q = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
in_proj_weight_kv = Parameter(torch.Tensor(2 * embed_dim, embed_dim)).double().cuda()
out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
in_proj_bias_q = Parameter(torch.Tensor(embed_dim)).double().cuda()
in_proj_bias_kv = Parameter(torch.Tensor(2 * embed_dim)).double().cuda()
out_proj_bias = Parameter(torch.Tensor(embed_dim)).double().cuda()
r_q = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_q = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
r_kv = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_kv = torch.nn.Parameter(torch.Tensor(bsz, 2 * embed_dim)).double().cuda()
# r_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
# s_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
std_ = math.sqrt(2.0 / (embed_dim + embed_dim))
nn.init.normal_(in_proj_weight_q, 0.0, std_)
nn.init.normal_(in_proj_weight_kv, 0.0, std_)
nn.init.normal_(out_proj_weight, 0.0, std_)
torch.nn.init.constant_(in_proj_bias_q, 0.0)
torch.nn.init.constant_(in_proj_bias_kv, 0.0)
torch.nn.init.constant_(out_proj_bias, 0.0)
with torch.no_grad():
r_q.bernoulli_(0.5).mul_(-2).add_(1)
s_q.bernoulli_(0.5).mul_(-2).add_(1)
r_kv.bernoulli_(0.5).mul_(-2).add_(1)
s_kv.bernoulli_(0.5).mul_(-2).add_(1)
r_o.bernoulli_(0.5).mul_(-2).add_(1)
s_o.bernoulli_(0.5).mul_(-2).add_(1)
# model = BEEncdecMultiheadAttn(n_heads, embed_dim, 0.0, ensemble)
# model = BatchEnsembleLinear(embed_dim, output_size, ensemble)
model = TestNetwork()
q = torch.randn((seq_len_q, bsz, embed_dim), requires_grad=True)
kv = torch.randn((seq_len_kv, bsz, embed_dim), requires_grad=True)
model = model.double().cuda()
q = q.double().cuda()
kv = kv.double().cuda()
print("Gradchecking ...")
torch.autograd.gradcheck(model, (q, kv, in_proj_weight_q, in_proj_weight_kv, out_proj_weight,
in_proj_bias_q, in_proj_bias_kv, out_proj_bias,
r_q, s_q, r_kv, s_kv, r_o, s_o))
| 26,603 | 46.677419 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/batch_ensemble/batch_ensemble_linear.py | import torch
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout
class BatchEnsembleMM(object):
@staticmethod
def forward(x, weight, bias, ensemble_r, ensemble_s):
"""
:param x: [T x B x H]
:param weight: [H_out x H]
:param bias: [H_out]
:param ensemble_r: [B x H]
:param ensemble_s: [B x H_out]
:return:
"""
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = weight.size(0)
assert bsz == ensemble_s.size(0)
# assert ensemble * bsz_per_ensemble == bsz, "Mini-batch must divide evenly to the ensembles"
# element-wise [T x B x H] \times [B x H]
x_r = torch.mul(x, ensemble_r)
# GEMM No Bias. Otherwise use addmm
x_mm = torch.mm(x_r.view(-1, hin), weight.transpose(0, 1))
x_mm = x_mm.view(len_x, bsz, hout)
# element-wise [T x B x Hout] \times [B x Hout]
x_s = torch.mul(x_mm, ensemble_s)
# add bias
x_s = torch.add(x_s, bias)
# we need to store the intermediate results for the backward pass
return x_s, x_mm, x_r
# maybe we need some allocated memory as well
@staticmethod
def backward(grad_y, x, x_r, x_mm, weight, ensemble_r, ensemble_s):
bsz, len_x, hin = x.size(1), x.size(0), x.size(2)
hout = x_mm.size(-1)
grad_bias = grad_y
grad_s = grad_y
# backprop through the last element-wise multiplication
grad_ensemble_s = torch.mul(grad_s, x_mm)
grad_ensemble_s = torch.sum(grad_ensemble_s, dim=0)
# backprop through the MM
grad_mm = torch.mul(grad_s, ensemble_s)
grad_mm = grad_mm.view(-1, hout)
grad_r = torch.mm(grad_mm, weight).view(len_x, bsz, hin)
# GEMM: [hout x bsz] \times [bsz x hin]
grad_weight = torch.mm(grad_mm.transpose(0, 1), x_r.view(-1, hin))
# back prop through the first element-wise multiplication
# element-wise [len_x, bsz, hin] \cdot [bsz, hin]
grad_x = torch.mul(grad_r, ensemble_r)
# grad ensemble r
grad_ensemble_r = torch.mul(grad_r, x)
grad_ensemble_r = torch.sum(grad_ensemble_r, dim=0)
return grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s
class BatchEnsembleLinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weight, bias, ensemble_r, ensemble_s):
x_s, x_mm, x_r = BatchEnsembleMM.forward(x, weight, bias, ensemble_r, ensemble_s)
output = x_s
ctx.save_for_backward(x, weight, bias, ensemble_r, ensemble_s, x_mm, x_r)
return output
@staticmethod
def backward(ctx, grad_output):
x, weight, bias, ensemble_r, ensemble_s, x_mm, x_r = ctx.saved_tensors
grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s = \
BatchEnsembleMM.backward(grad_output, x, x_r, x_mm, weight, ensemble_r, ensemble_s)
return grad_x, grad_weight, grad_bias, grad_ensemble_r, grad_ensemble_s
class BatchEnsembleLinear(torch.nn.Module):
# TODO: write gradcheck testing
def __init__(self, input_size, output_size, ensemble):
super().__init__()
self.weight = torch.nn.Parameter(torch.Tensor(output_size, input_size))
self.bias = torch.nn.Parameter(torch.Tensor(output_size))
self.r = torch.nn.Parameter(torch.Tensor(ensemble, input_size))
self.s = torch.nn.Parameter(torch.Tensor(ensemble, output_size))
self.reset_parameters()
def reset_parameters(self, init='normal'):
if init == 'normal':
torch.nn.init.xavier_normal_(self.weight)
else:
torch.nn.init.xavier_uniform_(self.weight)
# for batch ensemble we init r_i and s_i with random sign vectors
# with torch.no_grad():
# self.r.bernoulli_(0.5).mul_(-2).add_(1)
# self.s.bernoulli_(0.5).mul_(-2).add_(1)
torch.nn.init.normal_(self.r, 0.0, 0.02)
torch.nn.init.normal_(self.s, 0.0, 0.02)
def forward(self, input, indices=None):
"""
:param input: T x B x H
:param indices: T x B or B
:return:
"""
ensemble = self.r.size(0)
bsz = input.size(1) if len(input.shape) == 3 else input.size(0)
if indices is None: # if indices are not None, then w
with torch.no_grad():
indices = torch.arange(0, bsz, device=input.device, dtype=torch.long)
indices = torch.remainder(indices, ensemble)
# during training, we randomly select the ensemble_id into batch size
if self.training:
r = torch.index_select(self.r, 0, indices)
s = torch.index_select(self.s, 0, indices)
if len(input.shape) == 3:
return BatchEnsembleLinearFunction.apply(input, self.weight, self.bias, r, s)
if len(input.shape) == 2:
return torch.mul(F.linear(torch.mul(input, r), weight, bias), s)
# during eval we have to repeat the dimensions ensemble times
else:
if len(input.shape) == 3:
if indices is not None:
len_x, bsz, hin = input.size(0), input.size(1), input.size(2)
input = input.repeat(1, ensemble, 1)
# we need the transpose step to ensure that both should have ensemble x batch
# but should it be ensemble x batch or batch x ensemble ? ...
# TODO: test at decoding time. batch_size=beam_size=1 should yield the same result
# r = self.r.repeat(bsz, 1).view(bsz, ensemble, self.r.size(-1)).\
# transpose(0, 1).contiguous().view(-1, self.r.size(-1))
# s = self.s.repeat(bsz, 1).view(bsz, ensemble, self.s.size(-1)).\
# transpose(0, 1).contiguous().view(-1, self.s.size(-1))
input = input.view(len_x, ensemble, bsz, hin)
r = self.r.unsqueeze(1) # ensemble x 1 x hin
s = self.s.unsqueeze(1) # ensemble x 1 x hout
output = torch.mul(F.linear(torch.mul(input, r), self.weight, self.bias), s)
output = output.view(len_x, ensemble, bsz, output.size(-1))
# output = BatchEnsembleLinearFunction.apply(input, self.weight, self.bias, r, s)
# output = output.view(len_x, ensemble, bsz, -1)
output = torch.mean(output, dim=1)
return output
else:
r = torch.index_select(self.r, 0, indices)
s = torch.index_select(self.s, 0, indices)
if len(input.shape) == 3:
return BatchEnsembleLinearFunction.apply(input, self.weight, self.bias, r, s)
if len(input.shape) == 2:
return torch.mul(F.linear(torch.mul(input, r), weight, bias), s)
else:
bsz, hin = input.size(0), input.size(1)
input = input.repeat(ensemble, 1)
r = self.r.repeat(bsz, 1).view(bsz, ensemble, self.r.size(-1)).\
transpose(0, 1).view(-1, self.r.size(-1))
s = self.s.repeat(bsz, 1).view(bsz, ensemble, self.s.size(-1)).\
transpose(0, 1).view(-1, self.s.size(-1))
output = torch.mul(F.linear(torch.mul(input, r), weight, bias), s)
output = output.view(ensemble, bsz, -1)
output = torch.mean(output, dim=0)
return output
class BEPositionWiseFeedForward(torch.nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, model_size, inner_size, dropout=0., variational=False, activation='relu', ensemble=1):
super().__init__()
# self.input_linear = BatchEnsembleLinear(model_size, inner_size, ensemble)
# self.output_linear = BatchEnsembleLinear(inner_size, model_size, ensemble)
self.variational = variational
self.dropout = dropout
self.activation = activation
self.ensemble = ensemble
self.in_proj_weight = torch.nn.Parameter(torch.Tensor(inner_size, model_size))
self.out_proj_weight = torch.nn.Parameter(torch.Tensor(model_size, inner_size))
self.in_proj_bias = torch.nn.Parameter(torch.Tensor(inner_size))
self.out_proj_bias = torch.nn.Parameter(torch.Tensor(model_size))
self.r_in = torch.nn.Parameter(torch.Tensor(ensemble, model_size))
self.s_in = torch.nn.Parameter(torch.Tensor(ensemble, inner_size))
self.r_out = torch.nn.Parameter(torch.Tensor(ensemble, inner_size))
self.s_out = torch.nn.Parameter(torch.Tensor(ensemble, model_size))
def forward(self, input, indices=None):
len_x, bsz = input.size(0), input.size(1)
ensemble = self.r_in.size(0)
if self.training:
with torch.no_grad():
indices = torch.arange(0, bsz, device=input.device, dtype=torch.long)
indices = torch.remainder(indices, ensemble)
r_in = torch.index_select(self.r_in, 0, indices)
s_in = torch.index_select(self.s_in, 0, indices)
r_out = torch.index_select(self.r_out, 0, indices)
s_out = torch.index_select(self.s_out, 0, indices)
input = torch.mul(input, r_in)
input = F.linear(input, self.in_proj_weight, self.in_proj_bias)
input = torch.mul(input, s_in)
input = F.relu(input)
if self.variational:
input = variational_dropout(input, p=self.dropout, training=self.training)
else:
input = F.dropout(input, p=self.dropout, training=self.training)
input = torch.mul(input, r_out)
input = F.linear(input, self.out_proj_weight, self.out_proj_bias)
input = torch.mul(input, s_out)
return input
else:
input = input.repeat(1, ensemble, 1).view(len_x, ensemble, bsz, input.size(-1))
input = torch.mul(input, self.r_in.unsqueeze(1))
input = F.linear(input, self.in_proj_weight, self.in_proj_bias)
input = torch.mul(input, self.s_in.unsqueeze(1))
input = F.relu(input)
input = torch.mul(input, self.r_out.unsqueeze(1))
input = F.linear(input, self.out_proj_weight, self.out_proj_bias)
input = torch.mul(input, self.s_out.unsqueeze(1))
input = torch.mean(input, dim=1)
return input
# hidden = self.input_linear(input, indices)
# hidden = F.relu(hidden)
# if self.variational:
# hidden = variational_dropout(hidden, p=self.dropout, training=self.training)
# else:
# hidden = F.dropout(hidden, p=self.dropout, training=self.training)
# hidden = self.output_linear(hidden, indices)
return hidden
def reset_parameters(self, init='normal'):
torch.nn.init.xavier_normal_(self.in_proj_weight)
torch.nn.init.xavier_normal_(self.out_proj_weight)
torch.nn.init.constant_(self.in_proj_bias, 0.0)
torch.nn.init.constant_(self.out_proj_bias, 0.0)
torch.nn.init.normal_(self.r_in, 0.0, 0.02)
torch.nn.init.normal_(self.s_in, 0.0, 0.02)
torch.nn.init.normal_(self.r_out, 0.0, 0.02)
torch.nn.init.normal_(self.s_out, 0.0, 0.02)
# self.input_linear.reset_parameters(init)
# self.output_linear.reset_parameters(init)
if __name__ == "__main__":
bsz = 16
seq_len = 6
input_size = 16
output_size = 32
ensemble = 72
model = BatchEnsembleLinear(input_size, output_size, ensemble)
input = torch.randn((seq_len, bsz, input_size), requires_grad=True)
print(input)
model = model.double().cuda()
input = input.double().cuda()
print("Gradchecking ...")
torch.autograd.gradcheck(model, input) | 12,098 | 38.15534 | 109 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/utils.py | import torch
def flatten_list(tensors):
flat = list()
indices = list()
shapes = list()
s = 0
for tensor in tensors:
shapes.append(tensor.shape)
flat_t = torch.flatten(tensor)
size = flat_t.shape[0]
flat.append(flat_t)
indices.append((s, s+size))
s += size
flat = torch.cat(flat).view(-1)
return flat, indices, shapes
def unflatten(flat, indices, shapes):
params = [flat[s:e] for (s, e) in indices]
for i, shape_p in enumerate(shapes):
params[i] = params[i].view(*shape_p)
return tuple(params)
| 599 | 18.354839 | 46 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/feed_forward.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
class PositionWiseFeedForward(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, model_size, inner_size, dropout=0., variational=False, activation='relu'):
super().__init__()
self.model_size = model_size
self.inner_size = inner_size
self.dropout = dropout
self.bias = True
self.variational = variational
self.activation = activation
# two variables to record the (sum) of priors for all linear variables
self.log_prior = 0
self.log_variational_posterior = 0
in_proj_weight_mu = torch.Tensor(inner_size, model_size)
in_proj_weight_rho = torch.Tensor(inner_size, model_size)
out_proj_weight_mu = torch.Tensor(model_size, inner_size)
out_proj_weight_rho = torch.Tensor(model_size, inner_size)
in_proj_bias_mu = torch.Tensor(inner_size)
in_proj_bias_rho = torch.Tensor(inner_size)
out_proj_bias_mu = torch.Tensor(model_size)
out_proj_bias_rho = torch.Tensor(model_size)
mu, self.indices, self.shapes = \
flatten_list([in_proj_weight_mu, out_proj_weight_mu, in_proj_bias_mu, out_proj_bias_mu])
rho, _, _ = flatten_list([in_proj_weight_rho, out_proj_weight_rho, in_proj_bias_rho, out_proj_bias_rho])
self.mu = Parameter(mu)
self.rho = Parameter(rho)
self.weight = Gaussian(self.mu, self.rho)
self.weight_prior = ScaleMixtureGaussian()
self.reset_parameters()
try:
from apex.mlp.mlp import mlp_function
self.optimized = 2
self.fast_mlp_func = mlp_function
except ModuleNotFoundError as e:
self.optimized = 2
def reset_parameters(self):
std_ = math.sqrt(2.0 / (self.model_size + self.inner_size))
nn.init.normal_(self.mu, 0.0, std_)
nn.init.normal_(self.rho, -5, 0.1)
def forward(self, input, sample=False, calculate_log_probs=False):
calculate_log_probs = calculate_log_probs or self.training
sample = sample or self.training
# (MCMC)
# Sample the weights from the variational posterior distribution q(w)
sampled_weights, log_variational_posterior = self.weight.sample(sample, calculate_log_probs)
in_proj_weight, out_proj_weight, in_proj_bias, out_proj_bias = \
unflatten(sampled_weights, self.indices, self.shapes)
if self.optimized == 2 or not input.is_cuda:
hidden = F.linear(input, in_proj_weight, in_proj_bias)
hidden = F.relu(hidden, inplace=True)
if self.variational:
hidden = variational_dropout(hidden, p=self.dropout, training=self.training)
else:
hidden = F.dropout(hidden, p=self.dropout, training=self.training)
hidden = F.linear(hidden, out_proj_weight, out_proj_bias)
else:
# Apex MLP does not support dropout so instead we use dropconnect
# Theoretically they should be the same ^^
weights = [in_proj_weight,
out_proj_weight]
biases = [in_proj_bias,
out_proj_bias]
seq_len, bsz, hidden_size = input.size(0), input.size(1), input.size(2)
# True = bias, 1 = relu
hidden = self.fast_mlp_func(True, 1, input.view(seq_len*bsz, -1), *weights, *biases)
hidden = hidden.view(seq_len, bsz, hidden_size)
if calculate_log_probs:
# KL Divergence between prior and (variational) posterior
self.log_variational_posterior = log_variational_posterior
self.log_prior = self.weight_prior.log_prob(sampled_weights)
return hidden
| 4,046 | 37.913462 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/gaussian.py | import torch
import torch.nn.functional as F
import numpy
import math
import torch.nn as nn
log_sqrt_2pi = math.log(math.sqrt(2 * math.pi))
class Gaussian(object):
def __init__(self, mu, rho):
super().__init__()
self.mu = mu
self.rho = rho
self.normal = torch.distributions.Normal(0, 1)
@property
def sigma(self):
# sigma = log(exp(rho) + 1) = softplus
return F.softplus(self.rho, beta=1) # this should be a numerically better option
# return torch.log1p(torch.exp(self.rho))
def sample(self, stochastic=False, return_log_prob=False):
wsize = self.mu.numel()
sigma = self.sigma
if stochastic:
# epsilon = self.normal.sample(self.rho.size()).type_as(self.mu)
epsilon = torch.rand_like(self.mu)
var = sigma * epsilon
# return torch.addcmul(self.mu, self.sigma, epsilon)
w = self.mu + var
else:
w = self.mu
var = 0
if not return_log_prob:
return w, 0
else:
sigma = sigma.float()
log_prob = (- log_sqrt_2pi
- torch.log(sigma)
- (var ** 2) / (2 * sigma ** 2)).sum()
# log_prob = (-(var ** 2) / (2 * sigma) - torch.log(sigma) - math.log(math.sqrt(2 * math.pi))).sum()
return w, log_prob
def log_prob(self, input):
sigma = self.sigma.float()
input = input.float()
return (math.log(math.sqrt(2 * math.pi))
- torch.log(sigma)
- ((input - self.mu) ** 2) / (2 * sigma ** 2)).sum()
class ScaleMixtureGaussian(object):
def __init__(self, pi=None, sigma1=None, sigma2=None):
super().__init__()
from onmt.constants import neg_log_sigma1, neg_log_sigma2, prior_pi
sigma1 = torch.cuda.FloatTensor([math.exp(-neg_log_sigma1)]) if sigma1 is None else sigma1
sigma2 = torch.cuda.FloatTensor([math.exp(-neg_log_sigma2)]) if sigma2 is None else sigma2
pi = prior_pi if pi is None else pi
self.pi = pi
self.sigma1 = sigma1
self.sigma2 = sigma2
self.gaussian1 = torch.distributions.Normal(0, sigma1)
self.gaussian2 = torch.distributions.Normal(0, sigma2)
def log_prob(self, input):
# input = input.float() # for exp better to cast to float
# print(input.type())
prob1 = torch.exp(self.gaussian1.log_prob(input))
prob2 = torch.exp(self.gaussian2.log_prob(input))
return (torch.log(self.pi * prob1 + (1 - self.pi) * prob2)).sum()
| 2,619 | 33.473684 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/generator.py | class Generator(nn.Module):
def __init__(self, hidden_size, output_size, fix_norm=False):
super(Generator, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.linear = nn.Linear(hidden_size, output_size)
self.fix_norm = fix_norm
stdv = 1. / math.sqrt(self.linear.weight.size(1))
torch.nn.init.uniform_(self.linear.weight, -stdv, stdv)
self.linear.bias.data.zero_()
def forward(self, output_dicts):
"""
:param output_dicts: dictionary contains the outputs from the decoder
:return: logits (the elements before softmax)
"""
input = output_dicts['hidden']
fix_norm = self.fix_norm
target_mask = output_dicts['target_mask']
if not fix_norm:
logits = self.linear(input).float()
else:
normalized_weights = F.normalize(self.linear.weight, dim=-1)
normalized_bias = self.linear.bias
logits = F.linear(input, normalized_weights, normalized_bias)
# softmax will be done at the loss function
# output = F.log_softmax(logits, dim=-1, dtype=torch.float32)
return logits
| 1,214 | 31.837838 | 77 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/relative_self_attention.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
from ..optimized.relative_self_attention_func import relative_self_attn_func
# from .fast_self_multihead_attn_func import fast_self_attn_func
# from .fast_self_multihead_attn_norm_add_func import fast_self_attn_norm_add_func
# from apex.normalization.fused_layer_norm import FusedLayerNorm
class RelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, model_size, num_heads, dropout=0.):
super().__init__()
self.model_size = model_size
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = model_size // num_heads
assert self.head_dim * num_heads == self.model_size, "model_size must be divisible by num_heads"
self.bias = True
self.log_prior = 0
self.log_variational_posterior = 0
in_proj_weight_mu = torch.Tensor(3 * model_size, model_size)
in_proj_weight_rho = torch.Tensor(3 * model_size, model_size)
out_proj_weight_mu = torch.Tensor(model_size, model_size)
out_proj_weight_rho = torch.Tensor(model_size, model_size)
pos_proj_weight_mu = torch.Tensor(model_size, model_size)
pos_proj_weight_rho = torch.Tensor(model_size, model_size)
in_proj_bias_mu = torch.Tensor(3*model_size)
in_proj_bias_rho = torch.Tensor(3*model_size)
out_proj_bias_mu = torch.Tensor(model_size)
out_proj_bias_rho = torch.Tensor(model_size)
pos_proj_bias_mu = torch.Tensor(model_size)
pos_proj_bias_rho = torch.Tensor(model_size)
r_w_bias_mu = torch.Tensor(self.num_heads, self.head_dim)
r_w_bias_rho = torch.Tensor(self.num_heads, self.head_dim)
r_r_bias_mu = torch.Tensor(self.num_heads, self.head_dim)
r_r_bias_rho = torch.Tensor(self.num_heads, self.head_dim)
mu, self.indices, self.shapes = flatten_list([in_proj_weight_mu, out_proj_weight_mu, pos_proj_weight_mu,
in_proj_bias_mu, out_proj_bias_mu, pos_proj_bias_mu,
r_w_bias_mu, r_r_bias_mu])
rho, _, _ = flatten_list([in_proj_weight_rho, out_proj_weight_rho, pos_proj_weight_rho,
in_proj_bias_rho, out_proj_bias_rho, pos_proj_bias_rho,
r_w_bias_rho, r_r_bias_rho])
self.mu = Parameter(mu)
self.rho = Parameter(rho)
self.weight = Gaussian(self.mu, self.rho)
self.weight_prior = ScaleMixtureGaussian()
self.reset_parameters()
self.attn_func = relative_self_attn_func
def reset_parameters(self):
# nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
# nn.init.xavier_uniform_(self.out_proj_weight)
std_ = math.sqrt(2.0 / (self.model_size + self.model_size))
nn.init.normal_(self.mu, 0.0, std_)
nn.init.normal_(self.rho, -5, 0.1)
# nn.init.uniform_(self.rho, -6, -5)
def forward(self, input, pos, key_padding_mask=None, attn_mask=None, mems=None,
incremental=False, incremental_cache=None, sample=False, calculate_log_probs=False):
calculate_log_probs = calculate_log_probs or self.training
sample = sample or self.training
# (MCMC)
# Sample the weights from the variational posterior distribution q(w)
sampled_weights, log_variational_posterior = self.weight.sample(sample, calculate_log_probs)
in_proj_weight, out_proj_weight, pos_proj_weight, \
in_proj_bias, out_proj_bias, pos_proj_bias, \
r_w_bias, r_r_bias = unflatten(sampled_weights, self.indices, self.shapes)
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
if len(mask.shape) == 3:
mask = mask.squeeze(0).transpose(0, 1)
elif attn_mask is not None:
mask = attn_mask
if len(mask.shape) == 3:
mask = mask.squeeze(-1)
else:
mask = None
is_training = self.training
outputs, coverage = self.attn_func(input, pos, attn_mask is not None, is_training, self.num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_w_bias, r_r_bias,
mask, self.dropout,
incremental, incremental_cache, False, False)
# last False is double precision
# KL Divergence between prior and (variational) posterior
if calculate_log_probs:
self.log_variational_posterior = log_variational_posterior
self.log_prior = self.weight_prior.log_prob(sampled_weights)
return outputs, coverage
| 5,261 | 42.131148 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/bayes_by_backprop/encdec_attention.py | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from ..optimized.encdec_attention_func import encdec_attn_func
from .gaussian import Gaussian, ScaleMixtureGaussian
from .utils import flatten_list, unflatten
class EncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, num_heads, embed_dim, attn_drop=0.):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = attn_drop
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = False
self.scaling = self.head_dim ** -0.5 # this value is hardcoded in the "fast" implementation
# two variables to record the (sum) of priors for all linear variables
self.log_prior = 0
self.log_variational_posterior = 0
# Q linear mapping weight
in_proj_weight_q_mu = torch.Tensor(embed_dim, embed_dim)
in_proj_weight_q_rho = torch.Tensor(embed_dim, embed_dim)
# KV Linear mapping weight
in_proj_weight_kv_mu = torch.Tensor(2 * embed_dim, embed_dim)
in_proj_weight_kv_rho = torch.Tensor(2 * embed_dim, embed_dim)
# Output linear mapping weight
out_proj_weight_mu = torch.Tensor(embed_dim, embed_dim)
out_proj_weight_rho = torch.Tensor(embed_dim, embed_dim)
self.mu, self.indices, self.shapes = \
flatten_list([in_proj_weight_q_mu, in_proj_weight_kv_mu, out_proj_weight_mu])
self.mu = Parameter(self.mu)
self.rho, _, _ = flatten_list([in_proj_weight_q_rho, in_proj_weight_kv_rho, out_proj_weight_rho])
self.rho = Parameter(self.rho)
self.weight = Gaussian(self.mu, self.rho)
self.weight_prior = ScaleMixtureGaussian()
self.attn_func = encdec_attn_func
self.reset_parameters()
try:
# the fast one requires apex and does not work with incremental so careful
from apex.contrib.multihead_attn.fast_encdec_multihead_attn_func import fast_encdec_attn_func
self.attn_func_fast = fast_encdec_attn_func
self.optimized = 1
except ModuleNotFoundError as e:
self.optimized = 2
self.attn_func_fast = None
def reset_parameters(self):
# We initialize μ with a Gaussian around 0
# (just as we would initialize standard weights of a neural network)
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.mu, 0.0, std_)
# It is important to initialize ρ (and hence σ) to a small value, otherwise learning might not work properly.
nn.init.normal_(self.rho, -7, 0.1)
def forward(self, query, key, value, attn_mask=None, incremental=False, incremental_cache=None,
sample=False, calculate_log_probs=False):
calculate_log_probs = calculate_log_probs or self.training
sample = sample or self.training
assert value is key, "ERROR: Keys and values must be the same."
is_training = self.training
time_masking = False
len_key = key.size(0)
# (MCMC)
# Sample the weights from the variational posterior distribution q(w)
sampled_weights, log_variational_posterior = \
self.weight.sample(stochastic=sample, return_log_prob=calculate_log_probs)
in_proj_weight_q, in_proj_weight_kv, out_proj_weight = unflatten(sampled_weights, self.indices, self.shapes)
# Perform forward with the sampled weights
if self.optimized == 1 and (self.training and not incremental) and len_key <= 1024 and query.is_cuda:
if attn_mask is not None:
if attn_mask.dim() == 3:
attn_mask = attn_mask.squeeze(1)
attn_mask = attn_mask.byte()
outputs = self.attn_func_fast(time_masking, is_training, self.num_heads,
query.type_as(in_proj_weight_q), key.type_as(in_proj_weight_q),
in_proj_weight_q, in_proj_weight_kv, out_proj_weight,
attn_mask, self.dropout)
coverage = None
# during evaluation we use the python binding which is safer ....
else:
outputs, coverage, = self.attn_func(time_masking, is_training,
self.num_heads, query, key,
in_proj_weight_q, in_proj_weight_kv,
out_proj_weight, attn_mask, self.dropout,
incremental, incremental_cache)
if calculate_log_probs:
# KL Divergence between prior and (variational) posterior
self.log_variational_posterior = log_variational_posterior
self.log_prior = self.weight_prior.log_prob(sampled_weights)
return outputs, coverage
| 5,175 | 42.133333 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/kernels/kernel.py |
"""Construct wide convolution kernels."""
from typing import Optional, Mapping, Tuple, Union
from collections import defaultdict
import math
import torch
import torch.nn as nn
| 180 | 15.454545 | 50 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/mlp/test_mlp_gelu.py | from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
try:
import fused_mlp_agelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_agelu = None
try:
import fused_mlp_gelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu = None
try:
import mlp_gelu_blaslt
except (ModuleNotFoundError, ImportError) as e:
mlp_gelu_blaslt = None
torch.backends.cuda.matmul.allow_tf32 = True
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
#
# class MlpReluFunction(torch.autograd.Function):
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx, activation, *args):
# output = fused_mlp.forward(args)
# ctx.save_for_backward(*args)
# ctx.outputs = output
# return output[0]
#
# @staticmethod
# @custom_bwd
# def backward(ctx, grad_o):
# grads = fused_mlp.backward(grad_o, ctx.outputs, ctx.saved_tensors)
# del ctx.outputs
# return (None, *grads)
#
#
class MlpReluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
output = fused_mlp_relu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_relu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpSiluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_silu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_silu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
class MlpGeLUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = fused_mlp_gelu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
ctx.weight_requires_grad = args[1].requires_grad
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
if ctx.weight_requires_grad:
grads = fused_mlp_gelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
else:
grads = fused_mlp_gelu.backward_input_only(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
for i in range(len(ctx.saved_tensors) - 1):
grads.append(None)
del ctx.outputs
del ctx.p
return (None, *grads)
class MlpGeLUFunctionBLASLT(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, recompute, *args):
outputs = mlp_gelu_blaslt.forward(p, args)
ctx.save_for_backward(*args)
ctx.recompute = recompute
if recompute:
ctx.outputs = (outputs[0], outputs[-1])
else:
ctx.outputs = outputs
dropout_mask = outputs[-1]
ctx.p = p
return outputs[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
recompute = ctx.recompute
grads = mlp_gelu_blaslt.backward(p, recompute, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
# del ctx.p
# del ctx.recompute
return (None, None, *grads)
if fused_mlp_gelu:
mlp_gelu_function = MlpGeLUFunction.apply
else:
mlp_gelu_function = None
if mlp_gelu_blaslt:
mlp_gelu_function_blaslt = MlpGeLUFunctionBLASLT.apply
else:
mlp_gelu_function_blaslt = None
if __name__ == '__main__':
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def __init__(self, mlp_sizes, activation='gelu', dropout=0.5):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
if activation == 'relu':
self.activation = 1
elif activation == 'sigmoid':
self.activation = 2
elif activation == 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., 0.0)
def forward(self, input, mask=None, ref=False, fastest=False, recompute=False):
if fastest and not ref:
return mlp_gelu_function_blaslt(self.dropout, recompute, input, *self.weights, *self.biases)
if ref:
return self.forward_ref(input, mask)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_agelu_function(self.dropout, input, *self.weights, *self.biases)
return mlp_gelu_function(self.dropout, input, *self.weights, *self.biases)
def forward_ref(self, input, mask):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
# print(mask.size())
# output = fast_silu(output) * dropout_mask.view(output.size(0), -1) * pinv
# output = GELUFunction.apply(output) * dropout_mask.view(output.size(0), -1) * pinv
if self.dropout > 0:
output = F.gelu(output) * dropout_mask.view_as(output) * pinv
else:
output = F.gelu(output)
i += output.numel()
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}, dropout={self.dropout}"
return s
seq_len = 1
batch_size = 1024
mlp_sizes = [1024, 4096, 1024]
num_iters = 512
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
print("Test numeric 3D ....")
for dropout in [0.0, 0.2, 0.5, 0.7]:
mlp = MLP(mlp_sizes, dropout=dropout).cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(1):
bsz = random.randint(8, batch_size // 8) * 8
test_input = torch.empty(seq_len, bsz, mlp_sizes[0], device="cuda").uniform_(-1.,
1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input, fastest=True, recompute=True)
ref_out = ref_mlp.forward(ref_input, dropout_mask, ref=True)
print(dropout_mask.sum() / dropout_mask.numel(), dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
for i in range(len(mlp.weights)):
np.testing.assert_allclose(
mlp.weights[i].grad.detach().cpu().numpy(),
ref_mlp.weights[i].grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[i].grad.detach().cpu().numpy(),
ref_mlp.biases[i].grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-5)
#
# def test_with_bias(self):
# mlp = MLP(mlp_sizes, activation=use_activation).cuda()
#
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0], device="cuda").uniform_(-1.,
# 1.).requires_grad_()
# ref_input = test_input.clone().detach().requires_grad_()
# mlp_out, dropout_mask = mlp(test_input, fastest=True)
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-4, rtol=1e-4)
#
# # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
# mlp_out.mean().mul(10.).backward()
# ref_out.mean().mul(10.).backward()
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-4, rtol=1e-4)
#
# for l in range(mlp.num_layers):
# np.testing.assert_allclose(
# mlp.weights[l].grad.detach().cpu().numpy(),
# ref_mlp.weights[l].grad.detach().cpu().numpy(),
# atol=1e-4, rtol=1e-4)
# np.testing.assert_allclose(
# mlp.biases[l].grad.detach().cpu().numpy(),
# ref_mlp.biases[l].grad.detach().cpu().numpy(),
# atol=1e-4, rtol=1e-4)
#
# def test_no_weight_grad(self):
#
# print("Test backward no weight grad ...")
# for dropout in [0.0, 0.35]:
# mlp = MLP(mlp_sizes, activation="gelu", dropout=dropout).cuda()
# print(mlp)
# for p in mlp.parameters():
# p.requires_grad = False
#
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0], device="cuda").uniform_(-1.,
# 1.).requires_grad_()
# ref_input = test_input.clone().detach().requires_grad_()
# mlp_out, dropout_mask = mlp(test_input)
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
#
# # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
# mlp_out.mean().mul(10.).backward()
# ref_out.mean().mul(10.).backward()
# np.testing.assert_allclose(
# test_input.grad.detach().cpu().numpy(),
# ref_input.grad.detach().cpu().numpy(),
# atol=1e-5, rtol=1e-4)
#
# # for l in range(mlp.num_layers):
# # np.testing.assert_allclose(
# # mlp.weights[l].grad.detach().cpu().numpy(),
# # ref_mlp.weights[l].grad.detach().cpu().numpy(),
# # atol=1e-7, rtol=1e-5)
# # np.testing.assert_allclose(
# # mlp.biases[l].grad.detach().cpu().numpy(),
# # ref_mlp.biases[l].grad.detach().cpu().numpy(),
# # atol=1e-7, rtol=1e-5)
#
# def test_no_grad(self):
# mlp = MLP(mlp_sizes).cuda()
# ref_mlp = deepcopy(mlp)
#
# test_input = torch.empty(seq_len, batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
# ref_input = test_input.clone().detach()
# mlp_out, dropout_mask = mlp(test_input)
#
# ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
# np.testing.assert_allclose(
# mlp_out.detach().cpu().numpy(),
# ref_out.detach().cpu().numpy(),
# atol=1e-7, rtol=1e-5)
def test_performance_half(self):
print("Testing performance ...")
for dropout in [0.0, 0.5]:
mlp = MLP(mlp_sizes, dropout=dropout).cuda().half()
print(mlp)
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
mlp_layers.append(torch.nn.GELU())
mlp_layers.append(nn.Dropout(dropout))
ref_mlp = nn.Sequential(*mlp_layers)
ref_mlp_compiled = nn.Sequential(*mlp_layers)
# ref_mlp = torch.compile(ref_mlp)
ref_mlp = ref_mlp.cuda().half()
ref_mlp_compiled = torch.compile(ref_mlp_compiled)
ref_mlp_compiled = ref_mlp_compiled.cuda().half()
test_input = torch.empty(
seq_len* batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
seq_len* batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp_compiled(ref_input)
ref_loss = ref_out.mean()
ref_mlp_compiled.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP compiled time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"Pytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# torch.cuda.synchronize()
# start_time = time()
# for _ in range(num_iters):
# mlp_out, _ = mlp(test_input)
# test_loss = mlp_out.mean()
# mlp.zero_grad()
# test_loss.backward()
# torch.cuda.synchronize()
# stop_time = time()
# print(F"C++ MLP 3D time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input.view(-1, test_input.size(-1)))
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input, fastest=True)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"BLASLT MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input, fastest=True, recompute=True)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"BLASLT MLP recompute time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
# def test_performance_half_no_grad_weight(self):
# print("Testing performance without backward to weight ...")
# for dropout in [0.0, 0.5]:
# mlp = MLP(mlp_sizes, dropout=dropout).cuda().half()
#
# mlp_layers = []
# for i in range(mlp.num_layers):
# linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
# mlp.weights[i].data.copy_(linear.weight)
# mlp.biases[i].data.copy_(linear.bias)
# mlp_layers.append(linear)
# if i < mlp.num_layers - 1:
# # mlp_layers.append(nn.ReLU(inplace=True))
# mlp_layers.append(torch.nn.GELU())
# mlp_layers.append(nn.Dropout(dropout))
#
# ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
#
# for p in mlp.parameters():
# p.requires_grad = False
#
# for p in ref_mlp.parameters():
# p.requires_grad = False
#
# test_input = torch.empty(
# batch_size, seq_len, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# ref_input = torch.empty(
# batch_size, seq_len, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
#
# # Warm up GPU
# for _ in range(num_iters):
# ref_out = ref_mlp(ref_input)
# ref_loss = ref_out.mean()
# ref_mlp.zero_grad()
# ref_loss.backward()
# mlp_out, _ = mlp(test_input)
# test_loss = mlp_out.mean()
# mlp.zero_grad()
# test_loss.backward()
#
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
# for _ in range(num_iters):
# ref_out = ref_mlp(ref_input)
# ref_loss = ref_out.mean()
# ref_mlp.zero_grad()
# ref_loss.backward()
# torch.cuda.synchronize()
# stop_time = time()
# print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
#
# torch.cuda.synchronize()
# start_time = time()
# for _ in range(num_iters):
# mlp_out, _ = mlp(test_input)
# test_loss = mlp_out.mean()
# mlp.zero_grad()
# test_loss.backward()
# torch.cuda.synchronize()
# stop_time = time()
# print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# torch.cuda.profiler.stop()
unittest.main()
# test = TestMLP()
# test.test_creation()
# test.test_performance_half()
# test.test_with_bias()
| 22,661 | 38.005164 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/mlp/mlp.py | from copy import copy
import math
import torch
from torch import nn
import unittest
from time import time
import numpy as np
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
try:
import fused_mlp_silu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_silu = None
try:
import fused_mlp_gelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu = None
try:
import fused_mlp_agelu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_agelu = None
try:
import fused_mlp_gelu_dropout_add
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_gelu_dropout_add = None
try:
import mlp_gelu_blaslt
except (ModuleNotFoundError, ImportError) as e:
mlp_gelu_blaslt = None
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
class MlpReluFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, p, recompute, *args):
# only need to store dropout mask if we need to recompute
store_dropout_mask = recompute
output = fused_mlp_relu.forward(p, store_dropout_mask, args)
ctx.save_for_backward(*args)
ctx.recompute = recompute
if not recompute:
ctx.outputs = output
ctx.dropout_mask = None
else:
ctx.dropout_mask = output[-1]
ctx.outputs = None
ctx.p = p
return output[0]
@staticmethod
def backward(ctx, *grad_o):
p = ctx.p
if not ctx.recompute:
grads = fused_mlp_relu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
else:
grads = fused_mlp_relu.backward_recompute(p, grad_o[0], ctx.dropout_mask, ctx.saved_tensors)
del ctx.dropout_mask
return (None, None, *grads)
if fused_mlp_relu:
def mlp_relu_function(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return MlpReluFunction.apply(*args)
else:
mlp_relu_function = None
class MlpSiluFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, recompute, *args):
output = fused_mlp_silu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0]
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_silu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, None, *grads)
if fused_mlp_silu:
# mlp_silu_function = MlpSiluFunction.apply
def mlp_silu_function(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return MlpSiluFunction.apply(*args)
else:
mlp_silu_function = None
class MlpGELUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, recompute, *args):
if mlp_gelu_blaslt is not None:
output = mlp_gelu_blaslt.forward(p, args)
if recompute:
ctx.outputs = (output[0], output[-1])
del output[1]
del output[2]
else:
ctx.outputs = output
else:
output = fused_mlp_gelu.forward(p, args)
ctx.outputs = output
ctx.save_for_backward(*args)
# dropout_mask = output[-1]
ctx.p = p
ctx.recompute = recompute
ctx.requires_grad_weight = args[1].requires_grad
return output[0]
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
recompute = ctx.recompute
if ctx.requires_grad_weight:
if mlp_gelu_blaslt is not None:
grads = mlp_gelu_blaslt.backward(p, recompute, grad_o[0], ctx.outputs, ctx.saved_tensors)
else:
grads = fused_mlp_gelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
else:
if mlp_gelu_blaslt is not None:
grads = mlp_gelu_blaslt.backward_input_only(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
else:
grads = fused_mlp_gelu.backward_input_only(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
for _ in range(len(ctx.saved_tensors) - 1):
grads.append(None)
del ctx.requires_grad_weight
del ctx.outputs
del ctx.p
del ctx.recompute
return (None, None, *grads)
if fused_mlp_gelu or mlp_gelu_blaslt:
def mlp_gelu_function(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return MlpGELUFunction.apply(*args)
else:
mlp_gelu_function = None
class MlpAGELUFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, recompute, *args):
output = fused_mlp_agelu.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0]
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_agelu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, None, *grads)
if fused_mlp_agelu:
def mlp_agelu_function(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return MlpAGELUFunction.apply(*args)
else:
mlp_agelu_function = None
if __name__ == '__main__':
from copy import deepcopy
import torch.nn.functional as F
import random
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def fast_gelu_1(x):
# sqrt(2/pi) = 0.7978845608028654
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * (x + 0.044715 * torch.pow(x, 3.0))))
def __init__(self, mlp_sizes, activation='gelu', dropout=0.25):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
if activation == 'relu':
self.activation = 1
elif activation == 'sigmoid':
self.activation = 2
elif activation == 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., std)
def forward(self, input, mask=None, ref=False, blaslt=False):
if ref:
return self.forward_ref(input, mask=mask)
if not blaslt:
return mlp_gelu_function(self.dropout, False, input, *self.weights, *self.biases)
# print(input.type(), self.weights[0].type())
return mlp_gelu_blaslt_function(input, self.weights[0], self.biases[0], self.weights[1], self.biases[1])
def forward_ref(self, input, mask=None):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
# print(mask.size())
# output = fast_silu(output) * dropout_mask.view(output.size(0), -1) * pinv
# output = GELUFunction.apply(output) * dropout_mask.view(output.size(0), -1) * pinv
output = F.gelu(output) * dropout_mask.view(output.size(0), -1) * pinv
i += output.numel()
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}"
return s
batch_size = 24568
mlp_sizes = [1024, 4096, 1024]
# mlp_sizes = [4, 7, 4]
num_iters = 10
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
mlp = MLP(mlp_sizes, activation='gelu').cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(1):
bsz = random.randint(2850, batch_size // 8) * 8
test_input = torch.empty(bsz, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out = mlp(test_input)
ref_out = ref_mlp.forward(ref_input, ref=True)
# print(dropout_mask.sum() / dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[0].grad.detach().cpu().numpy(),
ref_mlp.biases[0].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_with_bias(self):
for use_activation in ['relu']:
mlp = MLP(mlp_sizes, activation=use_activation).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
for l in range(mlp.num_layers):
np.testing.assert_allclose(
mlp.weights[l].grad.detach().cpu().numpy(),
ref_mlp.weights[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[l].grad.detach().cpu().numpy(),
ref_mlp.biases[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
ref_input = test_input.clone().detach()
mlp_out = mlp(test_input)
ref_out = ref_mlp(ref_input, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
mlp = MLP(mlp_sizes).cuda().half()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
# mlp_layers.append(nn.ReLU(inplace=True))
mlp_layers.append(torch.nn.GELU())
mlp_layers.append(nn.Dropout(0.25))
ref_mlp = nn.Sequential(*mlp_layers).cuda().half()
test_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(100):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
# torch.cuda.synchronize()
# start_time = time()
# for _ in range(num_iters):
# mlp_out = mlp(test_input, blaslt=True)
# test_loss = mlp_out.mean()
# mlp.zero_grad()
# test_loss.backward()
# torch.cuda.synchronize()
# stop_time = time()
# print(F"BLASLT MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# torch.cuda.profiler.stop()
unittest.main()
| 15,995 | 33.252677 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/modules/mlp/test_mlp_relu.py | from copy import copy, deepcopy
import math
import torch
from torch import nn
import torch.nn.functional as F
import unittest
from time import time
import numpy as np
import random
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..optimized.compat import custom_fwd, custom_bwd
try:
import fused_mlp_relu
except (ModuleNotFoundError, ImportError) as e:
fused_mlp_relu = None
torch.set_float32_matmul_precision('high')
class MlpReluFunction(torch.autograd.Function):
@staticmethod
# @custom_fwd(cast_inputs=torch.float16)
@custom_fwd
def forward(ctx, p, *args):
store_dropout_mask = True
output = fused_mlp_relu.forward(p, store_dropout_mask, args)
ctx.save_for_backward(*args)
ctx.outputs = output
dropout_mask = output[-1]
ctx.p = p
return output[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = fused_mlp_relu.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads)
if fused_mlp_relu:
mlp_relu_function = MlpReluFunction.apply
else:
mlp_relu_function = None
class MlpReluRecomputeFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, p, *args):
store_dropout_mask = True
output = fused_mlp_relu.forward(p, store_dropout_mask, args)
ctx.save_for_backward(*args)
dropout_mask = output[-1]
ctx.dropout_mask = dropout_mask
ctx.p = p
return output[0], dropout_mask
@staticmethod
@custom_bwd
def backward(ctx, *grad_o):
p = ctx.p
dropout_mask = ctx.dropout_mask
grads = fused_mlp_relu.backward_recompute(p, grad_o[0], dropout_mask, ctx.saved_tensors)
del ctx.dropout_mask
return (None, *grads)
if fused_mlp_relu:
mlp_relu_recompute_function = MlpReluRecomputeFunction.apply
else:
mlp_relu_recompute_function = None
def foo(x, y):
a = torch.sin(x)
b = torch.cos(x)
return a + b
opt_foo1 = torch.compile(foo)
print("COMPILED")
if __name__ == '__main__':
class MLP(torch.nn.Module):
"""Launch MLP in C++
Args:
mlp_sizes (list of int): MLP sizes. Example: [1024,1024,1024] will create 2 MLP layers with shape 1024x1024
bias (bool): Default True:
relu (bool): Default True
"""
def __init__(self, mlp_sizes, activation='relu', dropout=0.25, recompute=False):
super(MLP, self).__init__()
self.num_layers = len(mlp_sizes) - 1
self.mlp_sizes = copy(mlp_sizes)
self.dropout = dropout
self.recompute = recompute
if activation == 'relu':
self.activation = 1
elif activation == 'sigmoid':
self.activation = 2
elif activation == 'gelu':
self.activation = 3
else:
raise TypeError("activation must be relu or none.")
self.weights = []
self.biases = []
for i in range(self.num_layers):
w = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1], mlp_sizes[i]))
self.weights.append(w)
name = 'weight_{}'.format(i)
setattr(self, name, w)
b = torch.nn.Parameter(torch.empty(mlp_sizes[i + 1]))
self.biases.append(b)
name = 'bias_{}'.format(i)
setattr(self, name, b)
self.reset_parameters()
def reset_parameters(self):
for weight in self.weights:
dimsum = weight.size(0) + weight.size(1)
std = math.sqrt(2. / float(dimsum))
nn.init.normal_(weight, 0., std)
for bias in self.biases:
std = math.sqrt(1. / float(bias.size(0)))
nn.init.normal_(bias, 0., std)
def forward(self, input, mask=None, ref=False):
if ref:
return self.forward_ref(input, mask)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_agelu_function(self.dropout, input, *self.weights, *self.biases)
# return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
if self.recompute:
return mlp_relu_recompute_function(self.dropout, input, *self.weights, *self.biases)
else:
return mlp_relu_function(self.dropout, input, *self.weights, *self.biases)
def forward_ref(self, input, mask):
i = 0
output = input
for l in range(self.num_layers):
output = F.linear(output, self.weights[l], self.biases[l])
dropout_mask = mask[i:i + output.numel()]
pinv = 1 / (1 - self.dropout)
if l < self.num_layers - 1:
# print(mask.size())
# output = fast_silu(output) * dropout_mask.view(output.size(0), -1) * pinv
# output = GELUFunction.apply(output) * dropout_mask.view(output.size(0), -1) * pinv
if self.dropout > 0:
output = F.relu(output) * dropout_mask.view_as(output) * pinv
else:
output = F.relu(output)
i += output.numel()
return output
def extra_repr(self):
# TODO add dropout probability
s = F"MLP sizes: {self.mlp_sizes}, activation={self.activation}, dropout={self.dropout}"
return s
batch_size = 1024
seq_len = 64
mlp_sizes = [512, 4096, 512]
# mlp_sizes = [4, 7, 4]
num_iters = 64
class TestMLP(unittest.TestCase):
def test_creation(self):
MLP(mlp_sizes)
def test_numeric(self):
print("Test numeric 3D ....")
for dropout in [0.0, 0.2, 0.5, 0.7]:
mlp = MLP(mlp_sizes, activation='relu', dropout=dropout).cuda()
print(mlp)
ref_mlp = deepcopy(mlp)
for _ in range(1):
bsz = random.randint(64, batch_size // 8) * 8
test_input = torch.empty(seq_len, bsz, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp.forward(ref_input, dropout_mask, ref=True)
print(dropout_mask.sum() / dropout_mask.numel())
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[0].grad.detach().cpu().numpy(),
ref_mlp.biases[0].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_with_bias(self):
for use_activation in ['relu']:
mlp = MLP(mlp_sizes, activation=use_activation).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
# Use mean value as scalar loss. Multiply 10 to make it big enough not zero out
mlp_out.mean().mul(10.).backward()
ref_out.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
for l in range(mlp.num_layers):
np.testing.assert_allclose(
mlp.weights[l].grad.detach().cpu().numpy(),
ref_mlp.weights[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
mlp.biases[l].grad.detach().cpu().numpy(),
ref_mlp.biases[l].grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_no_grad(self):
mlp = MLP(mlp_sizes).cuda()
ref_mlp = deepcopy(mlp)
test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)
ref_input = test_input.clone().detach()
mlp_out, dropout_mask = mlp(test_input)
ref_out = ref_mlp(ref_input, dropout_mask, ref=True)
np.testing.assert_allclose(
mlp_out.detach().cpu().numpy(),
ref_out.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
for dropout in [0.0, 0.5]:
mlp = MLP(mlp_sizes).cuda().half()
ref_mlp_fast = MLP(mlp_sizes, recompute=False, dropout=dropout).cuda().half()
mlp_layers = []
for i in range(mlp.num_layers):
linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])
mlp.weights[i].data.copy_(linear.weight)
mlp.biases[i].data.copy_(linear.bias)
mlp_layers.append(linear)
if i < mlp.num_layers - 1:
# mlp_layers.append(nn.ReLU(inplace=True))
mlp_layers.append(torch.nn.ReLU())
mlp_layers.append(nn.Dropout(dropout))
ref_mlp = nn.Sequential(*mlp_layers).cuda()
print("Compiling ref mlp ...")
ref_mlp = torch.compile(ref_mlp)
ref_mlp = ref_mlp.half()
test_input = torch.empty(
seq_len, batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
ref_input = torch.empty(
seq_len, batch_size // 2, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()
# Warm up GPU
for _ in range(100):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_mlp(ref_input)
ref_loss = ref_out.mean()
ref_mlp.zero_grad()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = mlp(test_input)
test_loss = mlp_out.mean()
mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP recompute time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
mlp_out, _ = ref_mlp_fast(ref_input)
test_loss = mlp_out.mean()
ref_mlp.zero_grad()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
unittest.main()
# test = TestMLP()
# test.test_creation()
# test.test_performance_half()
# test.test_with_bias()
| 13,167 | 36.409091 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/models/transformer_xl.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState
import onmt
from onmt.modules.bottle import Bottle
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.legacy.old_models.unified_transformer import UnifiedTransformer
from onmt.models.relative_transformer import SinusoidalPositionalEmbedding, StreamState, \
StreamDecodingState, RelativeTransformerDecoder
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
class TransformerXLDecoderLayer(nn.Module):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
variational=False, death_rate=0.0):
super(TransformerXLDecoderLayer, self).__init__()
self.version = version
self.ignore_source = ignore_source
self.variational = variational
self.death_rate = death_rate
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
d_head = d_model // h
self.multihead_tgt = RelPartialLearnableMultiHeadAttn(h, d_model, d_head, dropatt=attn_p)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, variational=self.variational)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
elif onmt.constants.activation_layer == 'linear_swish_linear':
ff_p = p
feedforward = FeedForwardSwish(d_model, d_ff, ff_p)
else:
raise NotImplementedError
self.feedforward = Bottle(feedforward)
def forward(self, input_, context, pos_emb, mask_tgt, mask_src, mems=None,
incremental=False, incremental_cache=None):
# incremental=False, incremental_cache=None, reuse_source=True):
""" Self attention layer with memory
layernorm > attn > dropout > residual
"""
assert context is None, "This model does not have an context encoder"
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
query = self.preprocess_attn(input_)
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
# out, _ = self.multihead_tgt(query, pos_emb, r_w_bias, r_r_bias, attn_mask=mask_tgt)
out, _, incremental_cache = self.multihead_tgt(query, pos_emb, attn_mask=mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input_ = self.postprocess_attn(out, input_)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input_))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input_ = self.postprocess_ffn(out, input_)
else:
coverage = None
if incremental:
return input_, coverage, incremental_cache
return input_, coverage
class TransformerXL(RelativeTransformerDecoder):
"""
This class combines the encoder and the decoder into one single sequence
Joined attention between encoder and decoder parts
"""
def __init__(self, opt, tgt_embedding, generator,
language_embeddings=None, **kwargs):
# self.tgt_embedding = tgt_embedding
self.model_size = opt.model_size
# build_modules will be called from the inherited constructor
super().__init__(opt, tgt_embedding,
None,
language_embeddings=language_embeddings,
ignore_source=True)
self.tgt_embedding = tgt_embedding
self.generator = generator
self.ignore_source = True
self.same_length = False
self.clamp_len = 0
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer LM Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = TransformerXLDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
ignore_source=True,
variational=self.variational_dropout, death_rate=death_r)
self.layer_modules.append(block)
def reset_states(self):
return
def tie_weights(self):
self.generator[0].linear.weight = self.tgt_embedding.weight
def forward(self, batch, target_mask=None, streaming=False, **kwargs):
tgt = batch.get('target_input')
tgt_lang = batch.get('target_lang')
if streaming:
streaming_state = kwargs.get('streaming_state', None)
mems = streaming_state.tgt_mems
else:
mems = None
qlen = tgt.size(0)
word_emb = embedded_dropout(self.tgt_embedding, tgt, dropout=self.word_dropout if self.training else 0)
word_emb.mul_(self.model_size ** 0.5)
if self.use_language_embedding:
lang_emb = self.language_embeddings(tgt_lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
word_emb = word_emb + lang_emb
else:
raise NotImplementedError
mlen = mems[0].size(0) if mems is not None else 0
# total length: memory + current input
klen = mlen + qlen
# all units having the same attention range
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1 + mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()[:, :, None]
dec_attn_mask = dec_attn_mask.bool()
pos = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.positional_encoder(pos)
# Applying dropout
output = self.preprocess_layer(word_emb)
if streaming:
hids = [output]
pos_emb = self.preprocess_layer(pos_emb)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
mems_i = None if mems is None else mems[i]
output, coverage = layer(output, None, pos_emb, dec_attn_mask, None,
mems=mems_i) # context and context_mask are None
if streaming:
hids.append(output)
# Final normalization
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': None, 'src': None,
'target_mask': target_mask}
output_dict = defaultdict(lambda: None, output_dict)
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
if streaming:
streaming_state.update_tgt_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
def init_stream(self):
param = next(self.parameters())
layers = self.layers
streaming_state = StreamState(layers, self.max_memory_size, param.device, param.dtype)
return streaming_state
# make a simple sampling sequence from some input
def sample(self, input):
return
| 9,715 | 36.513514 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/transformers.py | import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from torch.utils.checkpoint import checkpoint
import onmt
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer, PositionalEncoding, \
PrePostProcessing
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout, switchout
from onmt.modules.linear import FeedForward, FeedForwardSwish
from onmt.reversible_models.transformers import ReversibleTransformerEncoderLayer, ReversibleEncoderFunction, \
ReversibleDecoderFunction, ReversibleTransformerDecoderLayer
from onmt.utils import flip, expected_length
torch_version = float(torch.__version__[:3])
class MixedEncoder(nn.Module):
def __init(self, text_encoder, audio_encoder):
self.text_encoder = text_encoder
self.audio_encoder = audio_encoder
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: batch_size x len_src (to be transposed)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
""" Embedding: batch_size x len_src x d_model """
if input.dim() == 2:
return self.text_encoder.forward(input)
else:
return self.audio_encoder.forward(input)
class TransformerEncoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, embedding, positional_encoder, encoder_type='text', language_embeddings=None):
super(TransformerEncoder, self).__init__()
self.opt = opt
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
if hasattr(opt, 'encoder_layers') and opt.encoder_layers != -1:
self.layers = opt.encoder_layers
else:
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.input_type = encoder_type
self.cnn_downsampling = opt.cnn_downsampling
self.death_rate = opt.death_rate
self.switchout = opt.switchout
self.varitional_dropout = opt.variational_dropout
self.use_language_embedding = opt.use_language_embedding
self.language_embedding_type = opt.language_embedding_type
self.time = opt.time
self.lsh_src_attention = opt.lsh_src_attention
self.reversible = opt.src_reversible
feature_size = opt.input_size
self.channels = 1 # n. audio channels
if opt.upsampling:
feature_size = feature_size // 4
if encoder_type != "text":
if not self.cnn_downsampling:
self.audio_trans = nn.Linear(feature_size, self.model_size)
torch.nn.init.xavier_uniform_(self.audio_trans.weight)
else:
channels = self.channels # should be 1
if not opt.no_batch_norm:
cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32),
nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32)]
else:
cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True),
nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True)]
feat_size = (((feature_size // channels) - 3) // 4) * 32
self.audio_trans = nn.Sequential(*cnn)
self.linear_trans = nn.Linear(feat_size, self.model_size)
# assert self.model_size == feat_size, \
# "The model dimension doesn't match with the feature dim, expecting %d " % feat_size
else:
self.word_lut = embedding
self.time_transformer = positional_encoder
self.language_embedding = language_embeddings
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d',
variational=self.varitional_dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList()
self.build_modules()
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
if self.reversible:
print("* Reversible Transformer Encoder with Absolute Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Encoder with Absolute Attention with %.2f expected layers" % e_length)
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
if not self.reversible:
block = EncoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_lang=None, **kwargs):
"""
Inputs Shapes:
input: batch_size x len_src (to be transposed)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
""" Embedding: batch_size x len_src x d_model """
if self.input_type == "text":
mask_src = input.eq(onmt.constants.SRC_PAD).unsqueeze(1) # batch_size x 1 x len_src for broadcasting
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
else:
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(1)
# B x T x H -> T x B x H
context = emb.transpose(0, 1)
context = self.preprocess_layer(context)
if self.reversible:
# x_1 and x_2 are the same at first for reversible
context = torch.cat([context, context], dim=-1)
context = ReversibleEncoderFunction.apply(context, self.layer_modules, mask_src)
else:
for i, layer in enumerate(self.layer_modules):
context = layer(context, mask_src) # batch_size x len_src x d_model
context = self.postprocess_layer(context)
output_dict = {'context': context, 'src_mask': mask_src}
# return context, mask_src
return output_dict
class TransformerDecoder(nn.Module):
"""Decoder in 'Attention is all you need'"""
def __init__(self, opt, embedding, positional_encoder,
language_embeddings=None, ignore_source=False, allocate_positions=True):
"""
:param opt:
:param embedding:
:param positional_encoder:
:param attribute_embeddings:
:param ignore_source:
"""
super(TransformerDecoder, self).__init__()
opt.ignore_source = ignore_source
self.opt = opt
self.model_size = opt.model_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.encoder_type = opt.encoder_type
self.ignore_source = ignore_source
self.encoder_cnn_downsampling = opt.cnn_downsampling
self.variational_dropout = opt.variational_dropout
self.switchout = opt.switchout
self.death_rate = opt.death_rate
self.time = opt.time
self.use_language_embedding = opt.use_language_embedding
self.language_embedding_type = opt.language_embedding_type
self.reversible = opt.tgt_reversible
self.time_transformer = positional_encoder
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d',
variational=self.variational_dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = embedding
# Using feature embeddings in models
self.language_embeddings = language_embeddings
if self.language_embedding_type == 'concat':
self.projector = nn.Linear(opt.model_size * 2, opt.model_size)
self.positional_encoder = positional_encoder
if allocate_positions:
if hasattr(self.positional_encoder, 'len_max'):
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max, len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
self.layer_modules = nn.ModuleList()
self.build_modules()
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
if self.reversible:
print("* Reversible Transformer Decoder with Absolute Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Decoder with Absolute Attention with %.2f expected layers" % e_length)
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
if not self.reversible:
# block = DecoderLayer(self.n_heads, self.model_size,
# self.dropout, self.inner_size, self.attn_dropout,
# variational=self.variational_dropout, death_rate=death_r)
block = DecoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerDecoderLayer(self.opt, death_rate=_l)
self.layer_modules.append(block)
def renew_buffer(self, new_len):
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len + 1, new_len + 1)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def process_embedding(self, input, input_lang=None):
input_ = input
emb = embedded_dropout(self.word_lut, input_, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb.unsqueeze(1)
elif self.language_embedding_type == 'concat':
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
return emb
def forward(self, input, context, src, tgt_lang=None, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (to be transposed)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = self.process_embedding(input, tgt_lang)
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.SRC_PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
# mask_tgt = input.eq(onmt.constants.PAD).byte().unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
# mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte().unsqueeze(0)
mask_tgt = mask_tgt.bool()
output = self.preprocess_layer(emb.transpose(0, 1).contiguous())
if self.reversible:
# x_1 and x_2 are the same at first for reversible
output = torch.cat([output, output], dim=-1)
output = ReversibleDecoderFunction.apply(output, context, self.layer_modules,
mask_tgt, mask_src)
coverage = None
else:
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, context, mask_tgt, mask_src) # batch_size x len_src x d_model
# From Google T2T: normalization to control network output magnitude
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': context})
# return output, None
return output_dict
def step(self, input, decoder_state, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (to be transposed)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
# mask_src = decoder_state.src_mask
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1)
else:
input_ = input
""" Embedding: batch_size x 1 x d_model """
check = input_.gt(self.word_lut.num_embeddings)
emb = self.word_lut(input_)
""" Adding positional encoding """
emb = emb * math.sqrt(self.model_size)
emb = self.time_transformer(emb, t=input.size(1))
# emb should be batch_size x 1 x dim
if self.use_language_embedding:
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
if input.size(1) == 1:
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
if context is not None:
if self.encoder_type == "audio":
if src.dim() == 3:
if self.encoder_cnn_downsampling:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.SRC_PAD).unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.SRC_PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.SRC_PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.SRC_PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte().unsqueeze(0)
# only get the final step of the mask during decoding (because the input of the network is only the last step)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
if torch_version >= 1.2:
mask_tgt = mask_tgt.bool()
output = emb.contiguous()
if self.reversible:
# x_1 and x_2 are the same at first for reversible
# output = torch.cat([output, output], dim=-1)
output1, output2 = output, output
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
assert (output.size(0) == 1)
if self.reversible:
output1, output2, coverage, buffer = layer(output1, output2, context, mask_tgt, mask_src,
incremental=True, incremental_cache=buffer)
else:
output, coverage, buffer = layer(output, context, mask_tgt, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
if self.reversible:
output = output1 + output2
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
class Transformer(NMTModel):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None, rec_decoder=None, rec_generator=None,
mirror=False, ctc=False):
super().__init__(encoder, decoder, generator, rec_decoder, rec_generator, ctc=ctc)
self.model_size = self.decoder.model_size
self.switchout = self.decoder.switchout
if hasattr(self.decoder, 'word_lut'):
self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
if self.encoder.input_type == 'text':
self.src_vocab_size = self.encoder.word_lut.weight.size(0)
else:
self.src_vocab_size = 0
if mirror:
self.mirror_decoder = copy.deepcopy(self.decoder)
self.mirror_g = nn.Linear(decoder.model_size, decoder.model_size)
self.mirror_generator = copy.deepcopy(self.generator)
self.mirror_generator[0].linear.weight = self.decoder.word_lut.weight
if self.reconstruct:
self.rec_linear = nn.Linear(decoder.model_size, decoder.model_size)
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False, factorize=True,
pretrained_layer_states=None, **kwargs):
"""
:param pretrained_layer_states:
:param nce: use noise contrastive estimation
:param streaming_state:
:param streaming:
:param mirror: if using mirror network for future anticipation
:param batch: data object sent from the dataset
:param target_mask:
:param zero_encoder: zero out the encoder output (if necessary)
:return:
"""
if self.switchout > 0 and self.training:
batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, streaming=streaming,
src_lengths=src_lengths, streaming_state=streaming_state, factorize=factorize,
pretrained_layer_states=pretrained_layer_states)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
# the state is changed if streaming
streaming_state = encoder_output['streaming_state']
# zero out the encoder part for pre-training
if zero_encoder:
context.zero_()
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state, factorize=factorize)
# update the streaming state again
decoder_output = defaultdict(lambda: None, decoder_output)
streaming_state = decoder_output['streaming_state']
output = decoder_output['hidden']
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['streaming_state'] = streaming_state
output_dict['target'] = batch.get('target_output')
# output_dict['lid_logits'] = decoder_output['lid_logits']
# final layer: computing softmax
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
# Reconstruction network
if self.reconstruct:
bos = org_tgt[0].unsqueeze(0) # 1 x B
src_input = torch.cat([bos, org_src[:-1]], dim=0) # T x B
src_output = org_src
src_input = src_input.transpose(0, 1)
rec_context = self.rec_linear(output_dict['hidden']) # T x B x H
rec_decoder_output = self.rec_decoder(src_input, rec_context, tgt, tgt_lang=src_lang, input_pos=src_pos)
rec_output = rec_decoder_output['hidden']
rec_logprobs = self.rec_generator[0](rec_decoder_output)['logits']
output_dict['rec_logprobs'] = rec_logprobs
output_dict['rec_hidden'] = rec_output
output_dict['reconstruct'] = True
output_dict['rec_target'] = src_output
else:
output_dict['reconstruct'] = False
# compute the logits for each encoder step
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
def load_encoder_weights(self, pretrained_model):
pretrained_model.encoder.language_embedding = None
enc_language_embedding = self.encoder.language_embedding
self.encoder.language_embedding = None
encoder_state_dict = pretrained_model.encoder.state_dict()
self.encoder.load_state_dict(encoder_state_dict)
self.encoder.language_embedding = enc_language_embedding
def decode(self, batch, pretrained_layer_states=None):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
tgt_pos = batch.get('target_pos')
# tgt_atb = batch.get('target_atb') # a dictionary of attributes
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
# transpose to have batch first
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
context = self.encoder(src, input_pos=src_pos, input_lang=src_lang,
pretrained_layer_states=pretrained_layer_states)['context']
if hasattr(self, 'autoencoder') and self.autoencoder \
and self.autoencoder.representation == "EncoderHiddenState":
context = self.autoencoder.autocode(context)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
input_pos=tgt_pos)['hidden']
output = decoder_output
if hasattr(self, 'autoencoder') and self.autoencoder and \
self.autoencoder.representation == "DecoderHiddenState":
output = self.autoencoder.autocode(output)
for dec_t, tgt_t in zip(output, tgt_output):
dec_out = defaultdict(lambda: None)
dec_out['hidden'] = dec_t.unsqueeze(0)
dec_out['src'] = src
dec_out['context'] = context
if isinstance(self.generator, nn.ModuleList):
dec_out = self.generator[0](dec_out)
# gen_t = self.generator[0](dec_out)['logits']
else:
dec_out = self.generator(dec_out)
gen_t = dec_out['logits']
if dec_out['softmaxed'] is False:
gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
gen_t = gen_t.squeeze(0)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.TGT_PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.TGT_PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
self.decoder.renew_buffer(new_len)
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
# squeeze to remove the time step dimension
if isinstance(self.generator, nn.ModuleList):
output_dict = self.generator[0](output_dict)
else:
output_dict = self.generator(output_dict)
log_prob = output_dict['logits'].squeeze(0)
# the key 'softmaxed' should be included in generators.
# The 'normal linear + CE' doesn't need softmax
if output_dict['softmaxed'] is False:
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
try:
last_coverage = coverage[:, -1, :].squeeze(1)
except TypeError:
last_coverage = None
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True,
pretrained_classifier=None, pretrained_layer_states=None, **kwargs):
"""
Generate a new decoder state based on the batch input
:param pretrained_classifier: model to create mixtures
:param buffering:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_atb = batch.get('target_atb')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1)
if pretrained_classifier is not None:
mixture = pretrained_classifier(src_transposed)
encoder_output = self.encoder(src_transposed, input_pos=src_pos, input_lang=src_lang, atb=tgt_atb,
pretrained_layer_states=pretrained_layer_states)
print("[INFO] create Transformer decoding state with buffering", buffering)
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering, tgt_atb=tgt_atb)
return decoder_state
def init_stream(self):
pass
def set_memory_size(self, src_memory_size, tgt_memory_size):
pass
class TransformerDecodingState(DecoderState):
def __init__(self, src, tgt_lang, context, src_lang, beam_size=1, model_size=512, type=2,
cloning=True, buffering=False, src_mask=None, tgt_atb=None,
dec_pretrained_model="", ):
"""
:param src:
:param tgt_lang:
:param context:
:param src_lang:
:param beam_size:
:param model_size:
:param type: Type 1 is for old translation code. Type 2 is for fast buffering. (Type 2 default).
:param cloning:
:param buffering:
"""
self.beam_size = beam_size
self.model_size = model_size
self.attention_buffers = dict()
self.buffering = buffering
self.dec_pretrained_model = dec_pretrained_model
self.tgt_atb = tgt_atb
if type == 1:
# if audio only take one dimension since only used for mask
raise NotImplementedError
# self.original_src = src # TxBxC
# self.concat_input_seq = True
#
# if src is not None:
# if src.dim() == 3:
# # print(self.src.size())
# self.src = src.narrow(2, 0, 1).squeeze(2).repeat(1, beam_size)
# # self.src = src.repeat(1, beam_size, 1)
# # print(self.src.size())
# # self.src = src.repeat(1, beam_size, 1) # T x Bb x c
# else:
# self.src = src.repeat(1, beam_size)
# else:
# self.src = None
#
# if context is not None:
# self.context = context.repeat(1, beam_size, 1)
# else:
# self.context = None
#
# self.input_seq = None
# self.src_lang = src_lang
# self.tgt_lang = tgt_lang
elif type == 2:
bsz = src.size(1) # src is T x B
new_order = torch.arange(bsz).view(-1, 1).repeat(1, self.beam_size).view(-1)
new_order = new_order.to(src.device)
if cloning:
self.src = src.index_select(1, new_order) # because src is time first
if context is not None:
self.context = context.index_select(1, new_order)
else:
self.context = None
if src_mask is not None:
self.src_mask = src_mask.index_select(0, new_order)
else:
self.src_mask = None
else:
self.context = context
self.src = src
# self.src_mask = src_mask
self.concat_input_seq = False
self.tgt_lang = tgt_lang
self.src_lang = src_lang
else:
raise NotImplementedError
def update_attention_buffer(self, buffer, layer):
self.attention_buffers[layer] = buffer # dict of 2 keys (k, v) : T x B x H
def update_beam(self, beam, b, remaining_sents, idx):
if self.beam_size == 1:
return
for tensor in [self.src, self.input_seq]:
if tensor is None:
continue
t_, br = tensor.size()
sent_states = tensor.view(t_, self.beam_size, remaining_sents)[:, :, idx]
sent_states.copy_(sent_states.index_select(
1, beam[b].getCurrentOrigin()))
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is None:
continue
for k in buffer_:
t_, br_, d_ = buffer_[k].size()
sent_states = buffer_[k].view(t_, self.beam_size, remaining_sents, d_)[:, :, idx, :]
sent_states.data.copy_(sent_states.data.index_select(
1, beam[b].getCurrentOrigin()))
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
def prune_complete_beam(self, active_idx, remaining_sents):
model_size = self.model_size
def update_active_with_hidden(t):
if t is None:
return t
dim = t.size(-1)
# select only the remaining active sentences
view = t.data.view(-1, remaining_sents, dim)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
return view.index_select(1, active_idx).view(*new_size)
def update_active_without_hidden(t):
if t is None:
return t
view = t.view(-1, remaining_sents)
new_size = list(t.size())
new_size[-1] = new_size[-1] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
return new_t
self.context = update_active_with_hidden(self.context)
self.input_seq = update_active_without_hidden(self.input_seq)
if self.src.dim() == 2:
self.src = update_active_without_hidden(self.src)
elif self.src.dim() == 3:
t = self.src
dim = t.size(-1)
view = t.view(-1, remaining_sents, dim)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
self.src = new_t
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
for k in buffer_:
buffer_[k] = update_active_with_hidden(buffer_[k])
# For the new decoder version only
def _reorder_incremental_state(self, reorder_state):
if self.context is not None:
self.context = self.context.index_select(1, reorder_state)
if self.src_mask is not None:
self.src_mask = self.src_mask.index_select(0, reorder_state)
self.src = self.src.index_select(1, reorder_state)
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is not None:
for k in buffer_.keys():
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, reorder_state)
# if not self.dec_pretrained_model:
# buffer_[k] = buffer_[k].index_select(1, reorder_state) # beam/batch is the 2nd dim
# elif self.dec_pretrained_model in ["bert", "roberta", "bart"]:
# buffer_[k] = buffer_[k].index_select(0, reorder_state) # beam/batch is the first dim
# elif self.dec_pretrained_model in ["mbart", "mbart50"]:
# buffer_[k] = buffer_[k].index_select(1, reorder_state) # beam/batch is the 2nd dim
# else:
# print("Warning: check dec_pretrained_model type")
# raise NotImplementedError
| 40,743 | 39.18146 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/models/performer_layer.py | import torch
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device=None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
ratio = (projection_matrix.shape[0] ** -0.5)
projection = repeat(projection_matrix, 'j d -> b h j d', b=b, h=h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_data -
torch.max(data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_data - torch.max(data_dash)) + eps)
return data_dash.type_as(data)
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(), kernel_epsilon=0.001, normalize_data=True,
device=None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
projection = repeat(projection_matrix, 'j d -> b h j d', b=b, h=h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime.type_as(data)
def orthogonal_matrix_chunk(cols, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.qr(unstructured_block.cpu(), some=True)
q, r = map(lambda t: t.to(device), (q, r))
return q.t()
# what is nb?
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim=1)
elif scaling == 1:
multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
# non-causal linear attention
def linear_attention(q, k, v):
k_cumsum = k.sum(dim=-2)
D_inv = 1. / torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q))
context = torch.einsum('...nd,...ne->...de', k, v)
out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
return out
class FastAttention(nn.Module):
def __init__(self, dim_heads, nb_features=None, ortho_scaling=0, causal=False, generalized_attention=False,
kernel_fn=nn.ReLU(), no_projection=False):
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows=self.nb_features,
nb_columns=dim_heads, scaling=ortho_scaling)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
# if this is turned on, no projection will be used
# queries and keys will be softmax-ed as in the original efficient attention paper
self.no_projection = no_projection
self.causal = causal
if causal:
try:
import fast_transformers.causal_product.causal_product_cuda
self.causal_linear_fn = partial(causal_linear_attention)
except ImportError:
print(
'unable to import cuda code for auto-regressive Performer. will default to the memory inefficient non-cuda version')
self.causal_linear_fn = causal_linear_attention_noncuda
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device=device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v):
device = q.device
if self.no_projection:
q = q.softmax(dim=-1)
k = torch.exp(k) if self.causal else k.softmax(dim=-2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn=self.kernel_fn,
projection_matrix=self.projection_matrix, device=device)
q, k = map(create_kernel, (q, k))
else:
create_kernel = partial(softmax_kernel, projection_matrix=self.projection_matrix, device=device)
q = create_kernel(q, is_query=True)
k = create_kernel(k, is_query=False)
attn_fn = linear_attention if not self.causal else self.causal_linear_fn
out = attn_fn(q, k, v)
return out
class ProjectionUpdater(nn.Module):
def __init__(self, instance, feature_redraw_interval):
super().__init__()
self.instance = instance
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
def fix_projections_(self):
self.feature_redraw_interval = None
def redraw_projections(self):
model = self.instance
if not self.training:
return
if exists(self.feature_redraw_interval) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(model)
fast_attentions = find_modules(model, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def forward(self, x):
raise NotImplemented
class Attention(nn.Module):
def __init__(
self,
dim,
causal=False,
heads=8,
dim_head=64,
local_heads=0,
local_window_size=256,
nb_features=None,
feature_redraw_interval=1000,
generalized_attention=False,
kernel_fn=nn.ReLU(),
dropout=0.,
no_projection=False,
qkv_bias=False,
attn_out_bias=True
):
super().__init__()
assert dim % heads == 0, 'dimension must be divisible by number of heads'
dim_head = default(dim_head, dim // heads)
inner_dim = dim_head * heads
self.fast_attention = FastAttention(dim_head, nb_features, causal=causal,
generalized_attention=generalized_attention, kernel_fn=kernel_fn,
no_projection=no_projection)
self.heads = heads
self.global_heads = heads - local_heads
self.local_attn = LocalAttention(window_size=local_window_size, causal=causal, autopad=True, dropout=dropout,
look_forward=int(not causal),
rel_pos_emb_config=(dim_head, local_heads)) if local_heads > 0 else None
self.to_q = nn.Linear(dim, inner_dim, bias=qkv_bias)
self.to_k = nn.Linear(dim, inner_dim, bias=qkv_bias)
self.to_v = nn.Linear(dim, inner_dim, bias=qkv_bias)
self.to_out = nn.Linear(inner_dim, dim, bias=attn_out_bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x, pos_emb=None, context=None, mask=None, context_mask=None, **kwargs):
b, n, _, h, gh = *x.shape, self.heads, self.global_heads
cross_attend = exists(context)
context = default(context, x)
context_mask = default(context_mask, mask) if not cross_attend else context_mask
q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
(q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
attn_outs = []
if not empty(q):
if exists(context_mask):
global_mask = context_mask[:, None, :, None]
v.masked_fill_(~global_mask, 0.)
if exists(pos_emb) and not cross_attend:
q, k = apply_rotary_pos_emb(q, k, pos_emb)
out = self.fast_attention(q, k, v)
attn_outs.append(out)
if not empty(lq):
assert not cross_attend, 'local attention is not compatible with cross attention'
out = self.local_attn(lq, lk, lv, input_mask=mask)
attn_outs.append(out)
out = torch.cat(attn_outs, dim=1)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return self.dropout(out)
| 9,531 | 35.945736 | 136 | py |
NMTGMinor | NMTGMinor-master/onmt/models/transformer_layers.py | import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.modules.static_dropout import StaticDropout
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.self_attention import SelfMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from collections import defaultdict
from onmt.modules.pre_post_processing import PrePostProcessing
# class PrePostProcessing(nn.Module):
# """Applies processing to tensors
# Args:
# d_model: dimension of model
# p: dropout probabolity
# sequence of processing steps:
# n = normalization
# d = dropout
# a = adding previous input to output (residual)
# """
#
# def __init__(self, d_model, dropout_p, sequence='nda', variational=False, elementwise_affine=True):
# super(PrePostProcessing, self).__init__()
# self.d_model = d_model
# self.dropout_p = dropout_p
#
# self.steps = list(sequence)
#
# if onmt.constants.residual_type == 'gated':
# # gated residual
# # initialize k with one
# self.k = nn.Parameter(torch.ones(1))
#
# if 'n' in self.steps:
# ln = nn.LayerNorm((self.d_model,), elementwise_affine=elementwise_affine)
# self.layer_norm = Bottle(ln)
# if 'd' in self.steps:
# if variational:
# self.dropout = VariationalDropout(self.dropout_p, batch_first=False)
# else:
# self.dropout = nn.Dropout(self.dropout_p)
# if 'z' in self.steps:
# # Rezero residual method
# self.g = nn.Parameter(torch.tensor(0.0))
#
# def forward(self, tensor, input_tensor=None, mask=None):
#
# output = tensor
# for step in self.steps:
# if step == 'n':
# output = self.layer_norm(output, mask=mask)
# if step == 'd':
# output = self.dropout(output)
# if step == 'a':
# if input_tensor is not None:
# output = output + input_tensor
# if step == 'z': # rezero-residual but scaling the output with initially small g
# output = output * self.g
# if input_tensor is not None:
# output = output + input_tensor
# return output
def preprocessing(rezero, *args, **kwargs):
if rezero:
return Identity()
else:
return PrePostProcessing(*args, **kwargs)
class EncoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, variational=False, death_rate=0.0, **kwargs):
def __init__(self, opt, death_rate=0.0, **kwargs):
super(EncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(opt.rezero, opt.model_size, opt.dropout, sequence='n')
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, opt.dropout,
sequence='da', variational=self.variational)
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
activation=opt.ffn_activation, glu=opt.ffn_glu)
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if opt.fast_self_attention:
self.multihead = SelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
else:
self.multihead = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
if not opt.fast_feed_forward:
feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
self.feedforward = Bottle(feedforward)
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
activation=opt.ffn_activation, glu=opt.ffn_glu)
def forward(self, input, attn_mask):
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# MCR feedforward
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input))
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
query = self.preprocess_attn(input)
if self.fast_self_attention:
out, _ = self.multihead(query, None, attn_mask, None)
else:
out, _ = self.multihead(query, query, query, attn_mask)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
# checking for inf/nan which can happen randomly in fp16 ...
if torch.isinf(input).any() or torch.isnan(input).any():
clamp_value = torch.finfo(input.dtype).max - 1000
input.clamp_(min=-clamp_value, max=clamp_value)
return input
class DecoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, opt, death_rate=0.0):
super(DecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(opt.rezero, opt.model_size, opt.dropout, sequence='n')
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, opt.dropout,
sequence='da', variational=self.variational)
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
activation=opt.ffn_activation, glu=opt.ffn_glu)
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if opt.fast_self_attention:
self.multihead_tgt = SelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
else:
self.multihead_tgt = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not opt.fast_xattention:
self.multihead_src = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=2)
else:
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not opt.fast_feed_forward:
feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
self.feedforward = Bottle(feedforward)
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
activation=opt.ffn_activation, glu=opt.ffn_glu)
def forward(self, input, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental:
if incremental_cache is None:
incremental_cache = dict()
coverage = None
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# MCR feedforward
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input))
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
query = self.preprocess_attn(input)
if self.fast_self_attention:
out, _, = self.multihead_tgt(query, None, None, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
else:
out, _, = self.multihead_tgt(query, query, query, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
# checking for inf/nan which can happen randomly in fp16 ...
if torch.isinf(input).any() or torch.isnan(input).any():
clamp_value = torch.finfo(input.dtype).max - 1000
input.clamp_(min=-clamp_value, max=clamp_value)
return input, coverage, incremental_cache
class PositionalEncoding(nn.Module):
"""Adds positional embeddings to standard word embeddings
This matches the original TensorFlow implementation at
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py.
Args:
d_model: dimension of model
p: dropout probability
len_max: max seq length for pre-calculated positional embeddings
Inputs Shapes:
word_emb: batch_size x len_seq x d_model
Outputs Shapes:
out: batch_size x len_seq x d_model
"""
def __init__(self, d_model, p=0, len_max=512):
# save a fixed positional embedding matrix up to len_max,
# so that no need to recreate it everytime
super(PositionalEncoding, self).__init__()
self.len_max = len_max
self.d_model = d_model
self.data_type = None
self.renew(len_max)
self.p = p
def renew(self, new_max_len):
# detele the old variable to avoid Pytorch's error when register new buffer
cuda = False
if hasattr(self, 'pos_emb'):
cuda = self.pos_emb.is_cuda
# self.data_type = torch.type(self.pos_emb)
del self.pos_emb
position = torch.arange(0, new_max_len).float()
num_timescales = self.d_model // 2
log_timescale_increment = math.log(10000) / (num_timescales - 1)
inv_timescales = torch.exp(torch.arange(0, num_timescales).float() * -log_timescale_increment)
scaled_time = position.unsqueeze(1) * inv_timescales.unsqueeze(0)
pos_emb = torch.cat((torch.sin(scaled_time), torch.cos(scaled_time)), 1)
if cuda:
pos_emb = pos_emb.cuda()
if self.data_type is not None:
pos_emb.type(self.data_type)
# wrap in a buffer so that model can be moved to GPU
self.register_buffer('pos_emb', pos_emb)
# self.data_type = self.pos_emb.type()
self.len_max = new_max_len
def forward(self, word_emb, t=None):
len_seq = t if t else word_emb.size(1)
self.data_type = word_emb.type()
if len_seq > self.len_max:
self.renew(len_seq)
if word_emb.size(1) == len_seq:
time_ = self.pos_emb[:len_seq, :].type_as(word_emb)
out = word_emb + time_
else:
time_emb = self.pos_emb[len_seq - 1, :] # 1 x dim
# out should have size bs x 1 x dim
out = word_emb + time_emb.unsqueeze(0).repeat(word_emb.size(0), 1, 1).type_as(word_emb)
return out
def get_positional_embeddings(self, word_emb, t=None):
len_seq = t if t else word_emb.size(1)
self.data_type = word_emb.type()
if len_seq > self.len_max:
self.renew(len_seq)
if word_emb.size(1) == len_seq:
time_emb = self.pos_emb[:len_seq, :].type_as(word_emb)
else:
time_emb = self.pos_emb[len_seq - 1, :].unsqueeze(0).type_as(word_emb)
return time_emb
| 17,782 | 39.142212 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/models/pretrain_transformer.py | import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from torch.utils.checkpoint import checkpoint
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.models.transformers import TransformerDecodingState
torch_version = float(torch.__version__[:3])
class PretrainTransformer(NMTModel):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None, rec_decoder=None, rec_generator=None,
mirror=False, ctc=False):
super().__init__(encoder, decoder, generator, rec_decoder, rec_generator, ctc=ctc)
# if hasattr(decoder, 'dec_pretrained_model') and decoder.dec_pretrained_model:
# self.model_size = self.decoder.config.bert_hidden_size
# self.tgt_vocab_size = self.decoder.config.vocab_size
# self.switchout = 0
# else:
# self.model_size = self.decoder.model_size
# self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
# self.switchout = self.decoder.switchout
self.model_size = self.generator[0].linear.weight.size(1)
self.tgt_vocab_size = self.generator[0].linear.weight.size(0)
# if self.encoder.input_type == 'text':
# if hasattr(encoder, 'enc_pretrained_model') and encoder.enc_pretrained_model:
# self.src_vocab_size = self.encoder.config.vocab_size
# else:
# self.src_vocab_size = self.encoder.word_lut.weight.size(0)
# else:
self.src_vocab_size = self.tgt_vocab_size
if mirror:
self.mirror_decoder = copy.deepcopy(self.decoder)
self.mirror_g = nn.Linear(decoder.model_size, decoder.model_size)
self.mirror_generator = copy.deepcopy(self.generator)
self.mirror_generator[0].linear.weight = self.decoder.word_lut.weight
if self.reconstruct:
self.rec_linear = nn.Linear(decoder.model_size, decoder.model_size)
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False, **kwargs):
"""
:param nce: use noise contrastive estimation
:param streaming_state:
:param streaming:
:param mirror: if using mirror network for future anticipation
:param batch: data object sent from the dataset
:param target_mask:
:param zero_encoder: zero out the encoder output (if necessary)
:return:
"""
# if self.switchout > 0 and self.training:
# batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
src_attention_mask = src.ne(onmt.constants.SRC_PAD).long() # [b, src_len]
if hasattr(self.encoder, 'enc_pretrained_model') and self.encoder.enc_pretrained_model in ["bert", "roberta"]:
segments_tensor = src.ne(onmt.constants.SRC_PAD).long()
enc_outputs = self.encoder(src, src_attention_mask, segments_tensor) # the encoder is a pretrained model
context = enc_outputs[0]
encoder_output = defaultdict(lambda: None)
encoder_output['context'] = context
encoder_output['src_attention_mask'] = src_attention_mask
encoder_output['streaming_state'] = None
if hasattr(self.encoder, 'enc_pretrained_model') and \
self.encoder.enc_pretrained_model in ["mbart", "mbart50", "m2m", "m2m100", "deltalm"]:
# src_attention_mask = src.ne(onmt.constants.SRC_PAD).long()
src_attention_mask = batch.get("src_selfattn_mask")
enc_outputs = self.encoder(src, src_attention_mask) # the encoder is a pretrained model
context = enc_outputs[0]
context = context # .transpose(0, 1).contiguous()
encoder_output = defaultdict(lambda: None)
encoder_output['context'] = context
encoder_output['src_attention_mask'] = src_attention_mask
encoder_output['streaming_state'] = None
else:
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, streaming=streaming,
src_lengths=src_lengths, streaming_state=streaming_state)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
context = context.transpose(0, 1) # to make it consistent with bert batch first
# the state is changed
streaming_state = encoder_output['streaming_state']
# DECODER PART
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bert", "roberta"]:
# src: [b, src_l] context: [b, src_l, de_model]
tgt_token_type = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
tgt_attention_mask = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
decoder_output = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
token_type_ids=tgt_token_type,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask,
)
decoder_output = decoder_output[0]
output = decoder_output.transpose(0, 1) # [bsz, tgt_len, d] => [tgt_len, bsz, d]
output_dict = defaultdict(lambda: None)
context = context.transpose(0, 1) # to [src_l, b, de_model]
elif hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in \
["mbart", "mbart50", "m2m", "m2m100", "deltalm"]:
tgt_attention_mask = tgt.eq(onmt.constants.TGT_PAD).long() # [bsz, len]
# This mask is often ignored due to using a simple time-mask would also covers the pad-mask
# However it should be carefully handled in the case of using flash-attn
decoder_output = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask,
lang=tgt_lang,
)
decoder_output = decoder_output[0]
# output = decoder_output
output = decoder_output # .transpose(0, 1) # [bsz, tgt_len, d] => [tgt_len, bsz, d]
output_dict = defaultdict(lambda: None)
# context = context.transpose(0, 1) # to [src_l, b, de_model]
else:
context = context.transpose(0, 1) # to [src_l, b, de_model] src: [b, l]
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang,
input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state)
# update the streaming state again
decoder_output = defaultdict(lambda: None, decoder_output)
streaming_state = decoder_output['streaming_state']
output = decoder_output['hidden'] # [tgt_len, bsz, d]
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output # [tgt_len, bsz, d]
output_dict['context'] = context # [b, l, de_model]
output_dict['src_mask'] = encoder_output['src_attention_mask'] # [b, l, de_model]
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['streaming_state'] = streaming_state
output_dict['target'] = batch.get('target_output')
# final layer: computing softmax
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
# Reconstruction network
if self.reconstruct:
bos = org_tgt[0].unsqueeze(0) # 1 x B
src_input = torch.cat([bos, org_src[:-1]], dim=0) # T x B
src_output = org_src
src_input = src_input.transpose(0, 1)
rec_context = self.rec_linear(output_dict['hidden']) # T x B x H
rec_decoder_output = self.rec_decoder(src_input, rec_context, tgt, tgt_lang=src_lang, input_pos=src_pos)
rec_output = rec_decoder_output['hidden']
rec_logprobs = self.rec_generator[0](rec_decoder_output)['logits']
output_dict['rec_logprobs'] = rec_logprobs
output_dict['rec_hidden'] = rec_output
output_dict['reconstruct'] = True
output_dict['rec_target'] = src_output
else:
output_dict['reconstruct'] = False
# compute the logits for each encoder step
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
tgt_pos = batch.get('target_pos')
# tgt_atb = batch.get('target_atb') # a dictionary of attributes
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
# transpose to have batch first
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
context = self.encoder(src, input_pos=src_pos, input_lang=src_lang)['context']
if hasattr(self, 'autoencoder') and self.autoencoder \
and self.autoencoder.representation == "EncoderHiddenState":
context = self.autoencoder.autocode(context)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
input_pos=tgt_pos)['hidden']
output = decoder_output
if hasattr(self, 'autoencoder') and self.autoencoder and \
self.autoencoder.representation == "DecoderHiddenState":
output = self.autoencoder.autocode(output)
for dec_t, tgt_t in zip(output, tgt_output):
dec_out = defaultdict(lambda: None)
dec_out['hidden'] = dec_t.unsqueeze(0)
dec_out['src'] = src
dec_out['context'] = context
if isinstance(self.generator, nn.ModuleList):
gen_t = self.generator[0](dec_out)['logits']
else:
gen_t = self.generator(dec_out)['logits']
gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
gen_t = gen_t.squeeze(0)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.TGT_PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.TGT_PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
self.decoder.renew_buffer(new_len)
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index from time 0 to time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
# squeeze to remove the time step dimension
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32) # [beam*b, 1, vocab_size]
output_dict['log_prob'] = log_prob.squeeze(1)
# Currently attention score is not returned
# coverage = output_dict['coverage']
# last_coverage = coverage[:, -1, :].squeeze(1)
# output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_atb = batch.get('target_atb')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # [batch_size, src_len]
if not self.encoder.enc_pretrained_model:
encoder_output = self.encoder(src_transposed, input_pos=src_pos, input_lang=src_lang)
elif self.encoder.enc_pretrained_model in ['mbart', 'mbart50', 'm2m', 'm2m100', "deltalm"]:
src_attention_mask = batch.get("src_selfattn_mask")
enc_outputs = self.encoder(src_transposed, src_attention_mask)
context = enc_outputs[0]
encoder_output = defaultdict(lambda: None)
encoder_output["context"] = context
else:
print("Warning: unknown enc_pretrained_model")
raise NotImplementedError
dec_pretrained_model = self.decoder.dec_pretrained_model
if not dec_pretrained_model:
mask_src = None
elif dec_pretrained_model in["mbart", "mbart50", "m2m", "m2m100", "deltalm"]:
mask_src = src_attention_mask # batch_size x 1 x len_src for broadcasting
else:
print("Warning: unknown dec_pretrained_model")
raise NotImplementedError
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering, src_mask=mask_src,
dec_pretrained_model=self.decoder.dec_pretrained_model)
return decoder_state
def init_stream(self):
pass
def set_memory_size(self, src_memory_size, tgt_memory_size):
pass
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bert", "roberta"]:
self.generator[0].linear.weight = self.decoder.embeddings.word_embeddings.weight
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model \
in ["mbart", "mbart50", "m2m", "m2m100", "deltalm"]:
self.generator[0].linear.weight = self.decoder.embed_tokens.weight
else:
self.generator[0].linear.weight = self.decoder.word_lut.weight
| 18,440 | 45.923664 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/models/relative_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.reversible_models.relative_transformers import ReversibleEncoderFunction, ReversibleDecoderFunction, \
ReversibleTransformerDecoderLayer, ReversibleTransformerEncoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
torch.set_printoptions(threshold=500000)
# Positional Embedding with discrete inputs
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, demb):
super(SinusoidalPositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, sin_first=True, bsz=None):
"""
:param bsz:
:param pos_seq: sequences of RELATIVE position indices (can be negative for future)
:param sin_first: in Attention is all you need paper, sin is first then cosin
"""
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
if sin_first:
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
else:
pos_emb = torch.cat([sinusoid_inp.cos(), sinusoid_inp.sin()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].repeat(1, bsz, 1)
else:
return pos_emb[:, None, :]
class RelativeTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.experimental = opt.experimental
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
self.add_position_encoding = opt.add_position_encoding
# build_modules will be called from the inherited constructor
super(RelativeTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
# learnable position encoding
if self.learnable_position_encoding:
raise NotImplementedError
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
if self.reversible:
print("* Reversible Encoder with Relative Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Encoder with Relative Attention with %.2f expected layers" % e_length)
if self.unidirectional:
print("* Running a unidirectional Encoder.")
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
if not self.reversible:
block = RelativeTransformerEncoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def create_stream_mask(self, input, input_length, prev_mem_size):
lengths = input_length.tolist()
mask = None
for length in lengths:
# the current mask should be either None or
if mask is None:
prev_length = 0
else:
prev_length = mask.size(1)
# n current queries attend to n + p keys
current_mask = input.new_zeros(length, length + prev_length)
if prev_length > 0:
prev_mask = input.new_ones(prev_length, length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
if prev_mem_size > 0:
# all current elements attend to all buffer elements
buffer_mask = mask.new_zeros(mask.size(0), prev_mem_size)
mask = torch.cat([buffer_mask, mask], dim=-1)
mask = mask.bool()
return mask
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
bsz_first_input = input
input = input.transpose(0, 1)
# mask_src = input.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x src_len x 1 for broadcasting
dec_attn_mask = bsz_first_input.eq(onmt.constants.PAD).unsqueeze(1)
if streaming:
streaming_state = kwargs.get('streaming_state', None)
mems = streaming_state.src_mems
# mem_len = streaming_state.src_mems[0].size(0)
# mem_len = streaming_state.prev_src_mem_size
mem_len = mems[0].size(0) if mems is not None else 0
input_length = kwargs.get('src_lengths', None)
streaming_state = kwargs.get('streaming_state', None)
mask_src = self.create_stream_mask(input, input_length, mem_len)
mask_src = mask_src.unsqueeze(2)
else:
mem_len = 0
mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # batch_size x src_len x 1 for broadcasting
mems = None
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
# There is no "unsqueeze" here because the input is T x B x H and lang_emb is B x H
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
# print(lang_emb.size(), emb.size())
emb = emb + lang_emb.unsqueeze(0)
else:
if streaming:
raise NotImplementedError
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].transpose(0, 1).unsqueeze(0)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
abs_pos = None
mem_len = 0
mems = None
if self.unidirectional:
qlen = input.size(0)
klen = qlen + mem_len
attn_mask_src = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
pad_mask = mask_src
mask_src = pad_mask + attn_mask_src
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
mask_src = mask_src.gt(0)
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
if self.unidirectional:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1) if self.fast_self_attn else None)
if self.learnable_position_encoding:
raise NotImplementedError
# B x T x H -> T x B x H
context = emb
if streaming:
hids = [context]
# Apply dropout to both context and pos_emb
context = self.preprocess_layer(context)
pos_emb = self.preprocess_layer(pos_emb)
if self.reversible:
context = torch.cat([context, context], dim=-1)
assert streaming is not True, "Streaming and Reversible is not usable yet."
# print(context.size(), pos_emb.size())
context = ReversibleEncoderFunction.apply(context, pos_emb, self.layer_modules, mask_src)
else:
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
mems_i = mems[i] if mems is not None and streaming and self.max_memory_size > 0 else None
context = layer(context, pos_emb, mask_src, mems=mems_i)
if streaming:
hids.append(context)
# final layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input})
if streaming:
# streaming_state.prev_src_mem_size += sum(input_length.tolist())
# streaming_state.prune_source_memory(self.max_memory_size)
streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class RelativeTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
# build_modules will be called from the inherited constructor
super(RelativeTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source,
allocate_positions=False)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# Parameters for the position biases - deprecated. kept for backward compatibility
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
self.opt.ignore_source = self.ignore_source
if self.reversible:
print("* Transformer Reversible Decoder with Relative Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
if not self.reversible:
block = RelativeTransformerDecoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerDecoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
def create_context_mask(self, input, src, src_lengths, tgt_lengths, extra_context_length=0):
"""
Generate the mask so that part of the target attends to a part of the source
:param extra_context_length:
:param input:
:param src:
:param src_lengths:
:param tgt_lengths:
:return:
"""
mask = None
if self.stream_context == 'global':
# Global context: one target attends to everything in the source
for (src_length, tgt_length) in zip(src_lengths, tgt_lengths):
if mask is None:
prev_src_length = 0
prev_tgt_length = 0
else:
prev_src_length, prev_tgt_length = mask.size(1), mask.size(0)
# current sent attend to current src sent and all src in the past
current_mask = input.new_zeros(tgt_length, src_length + prev_src_length)
# the previous target cannot attend to the current source
if prev_tgt_length > 0:
prev_mask = input.new_ones(prev_tgt_length, src_length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
# the output mask has two parts: the prev and the current
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
elif self.stream_context in ['local', 'limited']:
# Local context: only attends to the aligned context
for (src_length, tgt_length) in zip(src_lengths, tgt_lengths):
if mask is None:
prev_src_length = 0
prev_tgt_length = 0
else:
prev_src_length, prev_tgt_length = mask.size(1), mask.size(0)
# current tgt sent attend to only current src sent
if prev_src_length > 0:
current_mask = torch.cat([input.new_ones(tgt_length, prev_src_length - extra_context_length),
input.new_zeros(tgt_length, src_length + extra_context_length)], dim=-1)
else:
current_mask = input.new_zeros(tgt_length, src_length + extra_context_length)
# the previous target cannot attend to the current source
if prev_tgt_length > 0:
prev_mask = input.new_ones(prev_tgt_length, src_length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
# the output mask has two parts: the prev and the current
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
mask = mask.bool()
return mask
def create_self_attn_mask(self, input, tgt_lengths, prev_tgt_mem_size):
"""
Create a mask for the target words attending to the past
:param input:
:param tgt_lengths:
:param prev_tgt_mem_size:
:return:
"""
if self.stream_context in ['local', 'global']:
qlen = sum(tgt_lengths.tolist())
mlen = prev_tgt_mem_size
klen = qlen + mlen
mask = torch.triu(input.new_ones(qlen, klen), diagonal=1 + mlen).bool()[:, :, None]
elif self.stream_context in ['limited']:
# limited means that every sentence only pay attention to the extra memory size
extra_mem_len = self.max_memory_size
# past_length = prev_tgt_mem_size
mask = None
memory_size = prev_tgt_mem_size
for length in tgt_lengths:
past_length = mask.size(0) if mask is not None else 0
qlen = length
mlen = min(memory_size, self.max_memory_size)
klen = qlen + mlen
cur_attn_mask = torch.triu(input.new_ones(qlen, klen), diagonal=1 + mlen)
# for the rest of the past sequence: don't look at them
if mlen < memory_size:
no_attn_mask = input.new_ones(qlen, memory_size - mlen)
cur_attn_mask = torch.cat([no_attn_mask, cur_attn_mask], dim=1)
if mask is not None:
prev_q, prev_k = mask.size(0), mask.size(1)
# the past doesn't look at future
prev_mask = input.new_ones(prev_q, qlen)
mask = torch.cat([mask, prev_mask], dim=1) # first, concatenate for the K dim
mask = torch.cat([mask, cur_attn_mask], dim=0) # concatenate for the Q dim
else:
mask = cur_attn_mask
memory_size = mask.size(1)
mask = mask.bool().unsqueeze(-1)
return mask
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None, tgt_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
if streaming:
src_lengths = kwargs.get("src_lengths", None)
tgt_lengths = kwargs.get("tgt_lengths", None)
streaming_state = kwargs.get("streaming_state")
mems = streaming_state.tgt_mems
extra_context = streaming_state.extra_context
extra_context_length = extra_context.size(0) if extra_context is not None else 0
# mem_len = streaming_state.prev_tgt_mem_size
mem_len = mems[0].size(0) if mems is not None else 0
else:
mem_len = 0
mems = None
extra_context = None
if self.use_language_embedding:
lang_emb = self.language_embeddings(tgt_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
if streaming:
context_attn_mask = self.create_context_mask(input, src,
src_lengths, tgt_lengths,
extra_context_length)
mask_src = context_attn_mask.unsqueeze(0)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input is either left or right aligned
if streaming:
dec_attn_mask = self.create_self_attn_mask(input, tgt_lengths, mem_len)
else:
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
# pad_mask = input.eq(onmt.constants.PAD).byte() # L x B
#
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
# dec_attn_mask = dec_attn_mask.gt(0)
dec_attn_mask = dec_attn_mask.bool()
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos, bsz=input.size(1) if self.fast_self_attn else None)
output = self.preprocess_layer(emb.contiguous())
if streaming:
hids = [output]
if extra_context is not None:
context = torch.cat([extra_context, context], dim=0)
pos_emb = self.preprocess_layer(pos_emb)
if self.reversible:
output = torch.cat([output, output], dim=-1)
output = ReversibleDecoderFunction.apply(output, pos_emb, context, self.layer_modules,
dec_attn_mask, mask_src)
coverage = None
else:
for i, layer in enumerate(self.layer_modules):
# batch_size x src_len x d_model output, coverage = layer(output, context, pos_emb, self.r_w_bias,
# self.r_r_bias, dec_attn_mask, mask_src)
mems_i = mems[i] if mems is not None and streaming and \
self.stream_context in ['local', 'global'] and self.max_memory_size > 0 else None
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src, mems=mems_i)
if streaming:
hids.append(output)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
if streaming:
# streaming_state.prev_tgt_mem_size += sum(tgt_lengths.tolist())
# streaming_state.prune_target_memory(self.max_memory_size)
# if we use the extra context: keep the last context
if self.extra_context_size > 0:
extra_context = context[-self.extra_context_size:].detach()
streaming_state.extra_context = extra_context
if self.stream_context in ['local', 'global']:
streaming_state.update_tgt_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
def step(self, input, decoder_state, streaming=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
if streaming:
return self.step_streaming(input, decoder_state)
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
buffering = decoder_state.buffering
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
else:
input_ = input.transpose(0, 1) # from B x T to T x B
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
if input.size(0) == 1:
emb[0] = lang_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
if not buffering:
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()[:, :, None]
if onmt.constants.torch_version >= 1.2:
dec_attn_mask = dec_attn_mask.bool()
else:
dec_attn_mask = None
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
output = emb.contiguous()
if self.reversible:
output_1, output_2 = output, output
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
if self.reversible:
if buffering:
output_1, output_2, coverage, buffer = layer(output_1, output_2, pos_emb, context,
dec_attn_mask, mask_src, incremental=True,
incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output_1, output_2, coverage, _ = layer(output_1, output_2, pos_emb, context,
dec_attn_mask, mask_src)
else:
if buffering:
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src)
if self.reversible:
output = output_1 + output_2
# normalize and take the last time step
output = self.postprocess_layer(output)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
def step_streaming(self, input, decoder_state):
"""Step function in streaming case"""
context = decoder_state.context
lang = decoder_state.tgt_lang
streaming_state = decoder_state.streaming_state
# for global model: push the context in
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1) # B x T to T x B
klen = input.size(0)
# If we start a new sentence to decode: reset the context memory
if klen == 1:
streaming_state.reset_context_memory()
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# need to manually definte src_lengths and tgt_lengths here
src_lengths = torch.LongTensor([context.size(0)])
tgt_lengths = torch.LongTensor([1])
if context is not None:
context_attn_mask = self.create_context_mask(input, src, src_lengths, tgt_lengths)
context_attn_mask = context_attn_mask.unsqueeze(0)
else:
context_attn_mask = None
dec_attn_mask = self.create_self_attn_mask(input, tgt_lengths, streaming_state.prev_tgt_mem_size)
dec_attn_mask = dec_attn_mask[:, -1:, :]
klen = 1 + streaming_state.prev_tgt_mem_size
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
for i, layer in enumerate(self.layer_modules):
# T x B x d_model
buffer = streaming_state.tgt_buffer[i]
# output, coverage = layer(output, context, pos_emb, self.r_w_bias, self.r_r_bias, dec_attn_mask, mask_src)
# reuse_source = True if input.size(1) == 1 else False
reuse_source = True
# reuse source is True in this case because we can reuse the context ...
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, context_attn_mask,
incremental=True, incremental_cache=buffer, reuse_source=reuse_source)
streaming_state.tgt_buffer[i] = buffer
output = self.postprocess_layer(output)
streaming_state.prev_tgt_mem_size += 1
streaming_state.prune_target_memory(self.max_memory_size)
extra_context = context[-self.extra_context_size:].detach()
output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': context})
output_dict['streaming_state'] = streaming_state
return output_dict
class RelativeTransformer(Transformer):
def create_decoder_state(self, batch, beam_size=1, type=1, streaming=False, previous_decoding_state=None,
factorize=True,
pretrained_layer_states=None, **kwargs):
"""
Generate a new decoder state based on the batch input
:param factorize:
:param pretrained_layer_states:
:param previous_decoding_state:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
# in this case batch size should be 1
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
src_transposed = src.transpose(0, 1)
if previous_decoding_state is None:
# if the previous stream is None (the first segment in the stream)
# then proceed normally like normal translation
# init a new stream state
streaming_state = self.init_stream() if streaming else None
encoder_output = self.encoder(src_transposed, input_pos=src_pos,
input_lang=src_lang, src_lengths=src_lengths,
streaming=streaming, streaming_state=streaming_state,
factorize=factorize, pretrained_layer_states=pretrained_layer_states)
if streaming:
decoder_state = StreamDecodingState(src, tgt_lang, encoder_output['context'],
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=True, streaming_state=streaming_state)
else:
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'],
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type)
else:
streaming_state = previous_decoding_state.streaming_state
# to have the same batch/beam size with the previous memory ..
src_transposed = src_transposed.repeat(beam_size, 1)
src = src.repeat(1, beam_size)
encoder_output = self.encoder(src_transposed, input_pos=src_pos,
input_lang=src_lang, src_lengths=src_lengths,
streaming=True, streaming_state=streaming_state)
context = encoder_output['context']
if self.decoder.extra_context_size > 0:
# print("Using extra context with extra %d states" % self.decoder.extra_context_size)
# print("")
prev_context = previous_decoding_state.context
extra_context = prev_context[-self.decoder.extra_context_size:].detach()
context = torch.cat([extra_context, context], dim=0)
prev_src = previous_decoding_state.src[-self.decoder.extra_context_size:].detach()
src = torch.cat([prev_src, src], dim=0)
decoder_state = StreamDecodingState(src, tgt_lang, context,
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=False, streaming_state=streaming_state)
return decoder_state
def init_stream(self):
param = next(self.parameters())
layers = self.decoder.layers
streaming_state = StreamState(layers, self.decoder.max_memory_size, param.device, param.dtype)
return streaming_state
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = F.log_softmax(log_prob.float(), dim=-1)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def set_memory_size(self, src_memory_size, tgt_memory_size):
self.encoder.max_memory_size = src_memory_size
self.decoder.max_memory_size = tgt_memory_size
class StreamState(object):
def __init__(self, nlayers, mem_len, device, dtype, training=True):
# Currently I implement two types of stream states
self.src_buffer = defaultdict(lambda: None)
self.prev_src_mem_size = 0
self.src_lengths = []
self.tgt_buffer = defaultdict(lambda: None)
self.prev_tgt_mem_size = 0
self.tgt_lengths = []
self.training = training
self.mem_len = mem_len
self.nlayers = nlayers
if self.training:
# initialize the memory
self.src_mems = []
self.tgt_mems = []
for i in range(self.nlayers + 1):
empty = torch.empty(0, dtype=dtype, device=device)
self.src_mems.append(empty)
empty = torch.empty(0, dtype=dtype, device=device)
self.tgt_mems.append(empty)
self.extra_context = None
self.context_memory = None
def prune_source_memory(self, mem_size):
pruning = mem_size < self.prev_src_mem_size
self.prev_src_mem_size = min(mem_size, self.prev_src_mem_size)
if pruning:
for i in self.src_buffer:
if self.src_buffer[i] is not None:
for key in self.src_buffer[i]:
self.src_buffer[i][key] = self.src_buffer[i][key][-mem_size:]
def prune_target_memory(self, mem_size):
pruning = mem_size < self.prev_tgt_mem_size
self.prev_tgt_mem_size = min(mem_size, self.prev_tgt_mem_size)
if pruning:
for i in self.tgt_buffer:
if self.tgt_buffer[i] is not None:
for key in self.tgt_buffer[i]:
# Don't prune the buffer for enc-dec context, only prune the memory
if key not in ['c_k', 'c_v']:
self.tgt_buffer[i][key] = self.tgt_buffer[i][key][-mem_size:]
def get_beam_buffer(self, beam_id):
buffer = dict()
for i in self.tgt_buffer:
buffer[i] = dict()
buffer[i]['v'] = self.tgt_buffer[i]['v'].index_select(1, beam_id) # the batch dim is 1
buffer[i]['k'] = self.tgt_buffer[i]['k'].index_select(1, beam_id)
return buffer
def set_beam_buffer(self, sent_states):
# assert(len(sent_states) == len(self.tgt_buffer))
tensor = self.tgt_buffer[0]['v']
hidden_size = tensor.size(-1)
# first let's try with min_length
beam_size = len(sent_states)
min_length = min([sent_states[b]['hidden_buffer'][0]['k'].size(0) for b in range(beam_size)])
mem_length = min_length
for l in self.tgt_buffer:
self.tgt_buffer[l]['v'] = tensor.new(mem_length, beam_size, hidden_size).zero_()
self.tgt_buffer[l]['k'] = tensor.new(mem_length, beam_size, hidden_size).zero_()
for b in range(beam_size):
self.tgt_buffer[l]['v'][:, b, :].copy_(sent_states[b]['hidden_buffer'][l]['v'][-mem_length:, 0])
self.tgt_buffer[l]['k'][:, b, :].copy_(sent_states[b]['hidden_buffer'][l]['k'][-mem_length:, 0])
# When we start a sentence a new, the context key and value buffers need to be reset
def reset_context_memory(self):
for l in self.tgt_buffer:
buffer_ = self.tgt_buffer[l]
buffer_.pop('c_k', None)
buffer_.pop('c_v', None)
def reset_target_memory(self):
for l in self.tgt_buffer:
buffer_ = self.tgt_buffer[l]
buffer_.pop('k', None)
buffer_.pop('v', None)
self.prev_tgt_mem_size = 0
def update_src_mems(self, hids, qlen):
# does not deal with None
if self.src_mems is None:
return None
mlen = self.src_mems[0].size(0) if self.src_mems is not None else 0
# mems is not None
assert len(hids) == len(self.src_mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([self.src_mems[i], hids[i]], dim=0)
extra_mem = cat[beg_idx:end_idx].detach()
new_mems.append(extra_mem)
# Important:
self.src_mems = new_mems
def update_tgt_mems(self, hids, qlen):
# does not deal with None
if self.tgt_mems is None:
return None
mlen = self.tgt_mems[0].size(0) if self.tgt_mems is not None else 0
# mems is not None
assert len(hids) == len(self.tgt_mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([self.tgt_mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
# Important:
self.tgt_mems = new_mems
class StreamDecodingState(DecoderState):
# We need to somehow create the state w.r.t the previous states of the encoder and decoder
def __init__(self, src, tgt_lang, context, src_mask, beam_size=1, model_size=512,
cloning=True, streaming_state=None, **kwargs):
self.beam_size = beam_size
self.model_size = model_size
self.src_mask = None
# self.attention_buffers = dict()
self.streaming_state = streaming_state
bsz = src.size(1) # this value should be 1 for
if cloning:
new_order = torch.arange(bsz).view(-1, 1).repeat(1, self.beam_size).view(-1)
new_order = new_order.to(src.device)
if context is not None:
self.context = context.index_select(1, new_order)
else:
self.context = None
self.src = src.index_select(1, new_order) # because src is batch first
else:
self.context = context
self.src = src
self.concat_input_seq = False
self.tgt_lang = tgt_lang
self.origin = torch.arange(self.beam_size).to(src.device)
# to know where each hypothesis comes from the previous beam
def get_beam_buffer(self, beam_id):
return self.streaming_state.get_beam_buffer(beam_id)
def set_beam_buffer(self, sent_states):
return self.streaming_state.set_beam_buffer(sent_states)
def update_attention_buffer(self, buffer, layer):
self.attention_buffers[layer] = buffer # dict of 2 keys (k, v) : T x B x H
# For the new decoder version only
def _reorder_incremental_state(self, reorder_state):
if self.context is not None:
self.context = self.context.index_select(1, reorder_state)
if self.src_mask is not None:
self.src_mask = self.src_mask.index_select(0, reorder_state)
self.src = self.src.index_select(1, reorder_state)
for l in self.streaming_state.src_buffer:
buffer_ = self.streaming_state.src_buffer[l]
if buffer_ is not None:
for k in buffer_.keys():
if buffer_[k] is not None:
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, reorder_state) # 1 for time first
for l in self.streaming_state.tgt_buffer:
buffer_ = self.streaming_state.tgt_buffer[l]
if buffer_ is not None:
for k in buffer_.keys():
if buffer_[k] is not None:
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, reorder_state) # 1 for time first
if self.streaming_state.src_mems is not None:
for l in range(len(self.streaming_state.src_mems)):
mems = self.streaming_state.src_mems[l]
if mems.size(0) > 0:
self.streaming_state.src_mems[l] = mems.index_select(1, reorder_state)
if self.streaming_state.tgt_mems is not None:
for l in range(len(self.streaming_state.tgt_mems)):
mems = self.streaming_state.tgt_mems[l]
if mems.size(0) > 0:
self.streaming_state.tgt_mems[l] = mems.index_select(1, reorder_state)
if self.streaming_state.context_memory is not None:
self.streaming_state.context_memory = self.streaming_state.context_memory.index_select(1, reorder_state)
self.origin = self.origin.index_select(0, reorder_state)
def prune_complete_beam(self, active_idx, remaining_sents):
pass
def update_beam(self, beam, b, remaining_sents, idx):
pass
| 49,047 | 40.11316 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/models/relative_transformer_layers.py | import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.utils import flip
from onmt.modules.bottle import Bottle
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish, FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
class RelativeTransformerEncoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, variational=False, death_rate=0.0, **kwargs):
def __init__(self, opt, death_rate=0.0, **kwargs):
super(RelativeTransformerEncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if not self.fast_self_attention:
self.multihead = RelPartialLearnableMultiHeadAttn(opt.n_heads, opt.model_size,
d_head, dropatt=opt.attn_dropout)
else:
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
print (opt.fast_feed_forward)
if not opt.fast_feed_forward:
feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout, variational=self.variational)
self.feedforward = Bottle(feedforward)
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
def forward(self, input, pos_emb, attn_mask, incremental=False, incremental_cache=None, mems=None):
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# memory for transformer-xl caching
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if not self.fast_self_attention:
out, _, incremental_cache = self.multihead(query, pos_emb, attn_mask=attn_mask, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
if incremental:
return input, incremental_cache
return input
class RelativeTransformerDecoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
# variational=False, death_rate=0.0):
def __init__(self, opt, death_rate=0.0):
super(RelativeTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.fast_xattention = opt.fast_xattention
# self.lfv_multilingual = opt.lfv_multilingual
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if opt.fast_xattention:
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
else:
self.multihead_src = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=2)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if not self.fast_self_attention:
self.multihead_tgt = RelPartialLearnableMultiHeadAttn(opt.n_heads, opt.model_size, d_head,
dropatt=opt.attn_dropout)
else:
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
# if not opt.fast_feed_forward:
# feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout, variational=self.variational)
# self.feedforward = Bottle(feedforward)
# else:
# self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
# variational=self.variational)
feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout, variational=self.variational)
self.feedforward = Bottle(feedforward)
# if opt.lfv_multilingual:
# self.lid_net = lid_net
# self.lfv_mapper = nn.Linear(opt.bottleneck_size, opt.model_size)
# else:
# self.lid_net = None
# self.lfv_mapper = None
# def forward(self, input, context, pos_emb, r_w_bias, r_r_bias, mask_tgt, mask_src):
def forward(self, input, context, pos_emb, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if self.fast_self_attention:
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _, incremental_cache = self.multihead_tgt(query, pos_emb, attn_mask=mask_tgt, mems=mems,
incremental=incremental,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
else:
coverage = None
return input, coverage, incremental_cache
| 10,207 | 44.775785 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/lstm.py | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from collections import defaultdict
import math
import onmt
from onmt.modules.base_seq2seq import NMTModel, DecoderState
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.dropout import embedded_dropout, switchout
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
import random
import time
class SpeechLSTMEncoder(nn.Module):
def __init__(self, opt, embedding, encoder_type='audio'):
super(SpeechLSTMEncoder, self).__init__()
self.opt = opt
self.model_size = opt.model_size
if hasattr(opt, 'encoder_layers') and opt.encoder_layers != -1:
self.layers = opt.encoder_layers
else:
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.input_type = encoder_type
self.cnn_downsampling = opt.cnn_downsampling
self.switchout = 0.0 # for speech it has to be
self.varitional_dropout = opt.variational_dropout
self.use_language_embedding = opt.use_language_embedding
self.language_embedding_type = opt.language_embedding_type
self.time = opt.time
self.lsh_src_attention = opt.lsh_src_attention
self.reversible = opt.src_reversible
self.multilingual_factorized_weights = opt.multilingual_factorized_weights
self.mfw_rank = opt.mfw_rank
feature_size = opt.input_size
self.channels = 1
if opt.upsampling:
feature_size = feature_size // 4
if not self.cnn_downsampling:
self.audio_trans = nn.Linear(feature_size, self.model_size)
torch.nn.init.xavier_uniform_(self.audio_trans.weight)
else:
channels = self.channels
cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32),
nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32)]
feat_size = (((feature_size // channels) - 3) // 4) * 32
self.audio_trans = nn.Sequential(*cnn)
self.linear_trans = nn.Linear(feat_size, self.model_size)
self.unidirect = False
self.rnn = nn.LSTM(input_size=self.model_size, hidden_size=self.model_size, num_layers=self.layers,
bidirectional=(not self.unidirect), bias=True, dropout=self.dropout, batch_first=True)
if self.multilingual_factorized_weights:
from onmt.modules.weight_control_lstm import WeightFactoredLSTM
self.rnn = WeightFactoredLSTM(self.rnn, dropout=opt.weight_drop, n_languages=opt.n_languages,
rank=self.mfw_rank, multiplicative=opt.mfw_multiplicative,
activation=opt.mfw_activation)
elif opt.weight_drop > 0:
from onmt.modules.weight_control_lstm import WeightDrop
weight_list = list()
for i in range(self.layers):
weight_list.append('weight_hh_l%d' % i)
weight_list.append('weight_hh_l%d_reverse' % i)
self.rnn = WeightDrop(self.rnn, weight_list, dropout=opt.weight_drop)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d',
variational=self.varitional_dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
def rnn_fwd(self, seq, mask, hid, src_lang=None):
"""
:param src_lang:
:param seq:
:param mask:
:param hid:
:return:
"""
if mask is not None:
lengths = mask.sum(-1).float().cpu()
seq = pack_padded_sequence(seq, lengths, batch_first=True, enforce_sorted=False)
if self.multilingual_factorized_weights:
seq, hid = self.rnn(seq, hid, indices=src_lang)
else:
seq, hid = self.rnn(seq, hid)
seq = pad_packed_sequence(seq, batch_first=True)[0]
else:
if self.multilingual_factorized_weights:
seq, hid = self.rnn(seq, hid, indices=src_lang)
else:
seq, hid = self.rnn(seq, hid)
return seq, hid
def forward(self, input, input_pos=None, input_lang=None, hid=None,
return_states=False, pretrained_layer_states=None, **kwargs):
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).gt(onmt.constants.PAD)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).gt(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4]
dec_attn_mask = ~mask_src
# the size seems to be B x T ?
emb = input
layer_states = dict()
seq, hid = self.rnn_fwd(emb, mask_src, hid, src_lang=input_lang)
if not self.unidirect:
hidden_size = seq.size(2) // 2
seq = seq[:, :, :hidden_size] + seq[:, :, hidden_size:]
if return_states:
layer_states[0] = seq
# Summing the context
if pretrained_layer_states is not None:
seq = seq + pretrained_layer_states[0]
# layer norm
seq = self.postprocess_layer(seq)
output_dict = {'context': seq.transpose(0, 1), 'src_mask': dec_attn_mask}
if return_states:
output_dict['layer_states'] = layer_states
return output_dict
class SpeechLSTMDecoder(nn.Module):
def __init__(self, opt, embedding, language_embeddings=None, **kwargs):
super(SpeechLSTMDecoder, self).__init__()
# Keep for reference
# Define layers
self.model_size = opt.model_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.variational_dropout = opt.variational_dropout
self.multilingual_factorized_weights = opt.multilingual_factorized_weights
self.mfw_rank = opt.mfw_rank
self.encoder_type = opt.encoder_type
self.n_languages = opt.n_languages
self.lstm = nn.LSTM(self.model_size, self.model_size, self.layers, dropout=self.dropout, batch_first=True)
if self.multilingual_factorized_weights:
from onmt.modules.weight_control_lstm import WeightFactoredLSTM
self.lstm = WeightFactoredLSTM(self.lstm, dropout=opt.weight_drop, n_languages=opt.n_languages,
rank=self.mfw_rank)
elif opt.weight_drop > 0:
from onmt.modules.weight_control_lstm import WeightDrop
# todo: change so that dropout applied on all layers
weight_list = list()
for i in range(self.layers):
weight_list.append('weight_hh_l%d' % i)
self.lstm = WeightDrop(self.lstm, weight_list, dropout=opt.weight_drop)
self.fast_xattention = opt.fast_xattention
self.n_head = 1 # fixed to always use 1 head
# also fix attention dropout to 0.0
if self.multilingual_factorized_weights:
self.fast_xattention = True
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
self.multihead_tgt = MFWEncdecMultiheadAttn(self.n_head, opt.model_size, 0.0, n_languages=opt.n_languages,
rank=opt.mfw_rank, weight_drop=0.0)
else:
if opt.fast_xattention:
self.multihead_tgt = EncdecMultiheadAttn(self.n_head, opt.model_size, 0.0)
else:
self.multihead_tgt = MultiHeadAttention(self.n_head, opt.model_size, attn_p=0.0, share=3)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d',
variational=self.variational_dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.preprocess_attn = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = embedding
self.encoder_cnn_downsampling = opt.cnn_downsampling
self.language_embeddings = language_embeddings
self.use_language_embedding = opt.use_language_embedding
self.language_embedding_type = opt.language_embedding_type
if self.language_embedding_type == 'concat':
self.projector = nn.Linear(opt.model_size * 2, opt.model_size)
print("* Create LSTM Decoder with %d layers." % self.layers)
def process_embedding(self, input, input_lang=None):
return input
def step(self, input, decoder_state, **kwargs):
context = decoder_state.context
buffer = decoder_state.lstm_buffer
attn_buffer = decoder_state.attention_buffers
hid = buffer["hidden_state"]
cell = buffer["cell_state"]
tgt_lang = decoder_state.tgt_lang
buffering = decoder_state.buffering
if hid is not None:
hid_cell = (hid, cell)
else:
hid_cell = None
lang = decoder_state.tgt_lang
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1)
else:
input_ = input
emb = self.word_lut(input_)
emb = emb * math.sqrt(self.model_size)
if self.use_language_embedding:
# print("Using language embedding")
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
dec_emb = emb + lang_emb.unsqueeze(1)
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
dec_emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
else:
dec_emb = emb
if context is not None:
if self.encoder_type == "audio":
if src.data.dim() == 3:
if self.encoder_cnn_downsampling:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
# if input_.size(0) > 1 and input_.size(1) > 1:
#
# lengths = input.gt(onmt.constants.PAD).sum(-1)
#
# dec_in = pack_padded_sequence(dec_emb, lengths, batch_first=True, enforce_sorted=False)
#
# dec_out, hidden = self.lstm(dec_in, hid_cell)
# dec_out = pad_packed_sequence(dec_out, batch_first=True)[0]
# else:
if self.multilingual_factorized_weights:
dec_out, hid_cell = self.lstm(dec_emb, hid_cell, indices=tgt_lang)
else:
dec_out, hid_cell = self.lstm(dec_emb, hid_cell)
decoder_state.update_lstm_buffer(hid_cell)
lt = input_.size(1)
attn_mask = mask_src.expand(-1, lt, -1) if not self.fast_xattention else mask_src.squeeze(1)
# dec_out = self.postprocess_layer(dec_out)
dec_out = self.preprocess_attn(dec_out)
dec_out = dec_out.transpose(0, 1)
if buffering:
buffer = attn_buffer[0]
if buffer is None:
buffer = dict()
if self.multilingual_factorized_weights:
output, coverage = self.multihead_tgt(dec_out, context, context, tgt_lang, tgt_lang, attn_mask,
incremental=True, incremental_cache=buffer)
else:
output, coverage = self.multihead_tgt(dec_out, context, context, attn_mask,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, 0)
else:
if self.multilingual_factorized_weights:
output, coverage = self.multihead_tgt(dec_out, context, context, tgt_lang, tgt_lang, attn_mask)
else:
output, coverage = self.multihead_tgt(dec_out, context, context, attn_mask)
output = (output + dec_out)
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': context})
return output_dict
def forward(self, dec_seq, enc_out, src, tgt_lang=None, hid=None, **kwargs):
emb = embedded_dropout(self.word_lut, dec_seq, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
if self.use_language_embedding:
# print("Using language embedding")
lang_emb = self.language_embeddings(tgt_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
dec_emb = emb + lang_emb.unsqueeze(1)
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
dec_emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
else:
dec_emb = emb
if enc_out is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0: enc_out.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
# if dec_seq.size(0) > 1 and dec_seq.size(1) > 1:
# lengths = dec_seq.gt(onmt.constants.PAD).sum(-1)
# dec_in = pack_padded_sequence(dec_emb, lengths, batch_first=True, enforce_sorted=False)
# dec_out, hid = self.lstm(dec_in, hid)
# dec_out = pad_packed_sequence(dec_out, batch_first=True)[0]
# else:
if self.multilingual_factorized_weights:
dec_out, hid = self.lstm(dec_emb, hid, indices=tgt_lang)
else:
dec_out, hid = self.lstm(dec_emb, hid)
lt = dec_seq.size(1)
attn_mask = mask_src.expand(-1, lt, -1) if not self.fast_xattention else mask_src.squeeze(1)
# dec_out = self.postprocess_layer(dec_out)
dec_out = self.preprocess_attn(dec_out)
dec_out = dec_out.transpose(0, 1).contiguous()
enc_out = enc_out.contiguous()
if self.multilingual_factorized_weights:
output, coverage = self.multihead_tgt(dec_out, enc_out, enc_out, tgt_lang, tgt_lang, attn_mask)
else:
output, coverage = self.multihead_tgt(dec_out, enc_out, enc_out, attn_mask)
output = (output + dec_out)
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': enc_out})
return output_dict
class SpeechLSTMSeq2Seq(NMTModel):
def __init__(self, encoder, decoder, generator=None, rec_decoder=None, rec_generator=None,
mirror=False, ctc=False):
super().__init__(encoder, decoder, generator, rec_decoder, rec_generator, ctc=ctc)
self.model_size = self.decoder.model_size
self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
if self.encoder.input_type == 'text':
self.src_vocab_size = self.encoder.word_lut.weight.size(0)
else:
self.src_vocab_size = 0
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False, pretrained_layer_states=None):
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths,
pretrained_layer_states=pretrained_layer_states)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
if zero_encoder:
context.zero_()
src_mask = encoder_output['src_mask']
decoder_output = self.decoder(tgt, context, src,
tgt_lang=tgt_lang, input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state)
decoder_output = defaultdict(lambda: None, decoder_output)
output = decoder_output['hidden']
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['reconstruct'] = None
output_dict['target'] = batch.get('target_output')
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
def step(self, input_t, decoder_state):
output_dict = self.decoder.step(input_t, decoder_state)
output_dict['src'] = decoder_state.src.transpose(0, 1)
# squeeze to remove the time step dimension
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
# TxB -> BxT
src_transposed = src.transpose(0, 1)
encoder_output = self.encoder(src_transposed, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths)
decoder_state = LSTMDecodingState(src, tgt_lang, encoder_output['context'],
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering)
return decoder_state
def decode(self, batch):
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
tgt_pos = batch.get('target_pos')
# tgt_atb = batch.get('target_atb') # a dictionary of attributes
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
context = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths)['context']
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
input_pos=tgt_pos)['hidden']
output = decoder_output
for dec_t, tgt_t in zip(output, tgt_output):
dec_out = defaultdict(lambda: None)
dec_out['hidden'] = dec_t.unsqueeze(0)
dec_out['src'] = src
dec_out['context'] = context
if isinstance(self.generator, nn.ModuleList):
gen_t = self.generator[0](dec_out)['logits']
else:
gen_t = self.generator(dec_out)['logits']
gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
gen_t = gen_t.squeeze(0)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
class LSTMDecodingState(DecoderState):
def __init__(self, src, tgt_lang, context, beam_size=1, model_size=512, type=2,
cloning=True, buffering=False):
self.beam_size = beam_size
self.model_size = model_size
self.lstm_buffer = dict()
self.lstm_buffer["hidden_state"] = None
self.lstm_buffer["cell_state"] = None
self.buffering = buffering
self.attention_buffers = defaultdict(lambda: None)
if type == 1:
# if audio only take one dimension since only used for mask
# raise NotImplementedError
self.original_src = src # TxBxC
self.concat_input_seq = True
if src is not None:
if src.dim() == 3:
self.src = src.narrow(2, 0, 1).squeeze(2).repeat(1, beam_size)
# self.src = src.repeat(1, beam_size, 1) # T x Bb x c
else:
self.src = src.repeat(1, beam_size)
else:
self.src = None
if context is not None:
self.context = context.repeat(1, beam_size, 1)
else:
self.context = None
self.input_seq = None
self.tgt_lang = tgt_lang
elif type == 2:
bsz = src.size(1) # src is T x B
new_order = torch.arange(bsz).view(-1, 1).repeat(1, self.beam_size).view(-1)
new_order = new_order.to(src.device)
if cloning:
self.src = src.index_select(1, new_order) # because src is batch first
if context is not None:
self.context = context.index_select(1, new_order)
else:
self.context = None
else:
self.context = context
self.src = src
self.input_seq = None
self.concat_input_seq = False
self.tgt_lang = tgt_lang
else:
raise NotImplementedError
def update_lstm_buffer(self, buffer):
hid, cell = buffer
# hid and cell should have size [n_layer, batch_size, hidden_size]
self.lstm_buffer["hidden_state"] = hid
self.lstm_buffer["cell_state"] = cell
def update_attention_buffer(self, buffer, layer):
self.attention_buffers[layer] = buffer
def update_beam(self, beam, b, remaining_sents, idx):
if self.beam_size == 1:
return
# print(self.input_seq)
# print(self.src.shape)
for tensor in [self.src, self.input_seq]:
if tensor is None:
continue
t_, br = tensor.size()
sent_states = tensor.view(t_, self.beam_size, remaining_sents)[:, :, idx]
sent_states.copy_(sent_states.index_select(1, beam[b].getCurrentOrigin()))
for l in self.lstm_buffer:
buffer_ = self.lstm_buffer[l]
t_, br_, d_ = buffer_.size()
sent_states = buffer_.view(t_, self.beam_size, remaining_sents, d_)[:, :, idx, :]
sent_states.data.copy_(sent_states.data.index_select(1, beam[b].getCurrentOrigin()))
for l in self.attention_buffers:
buffers = self.attention_buffers[l]
if buffers is not None:
for k in buffers.keys():
buffer_ = buffers[k]
t_, br_, d_ = buffer_.size()
sent_states = buffer_.view(t_, self.beam_size, remaining_sents, d_)[:, :, idx, :]
sent_states.data.copy_(sent_states.data.index_select(1, beam[b].getCurrentOrigin()))
def prune_complete_beam(self, active_idx, remaining_sents):
model_size = self.model_size
def update_active_with_hidden(t):
if t is None:
return t
dim = t.size(-1)
# select only the remaining active sentences
view = t.data.view(-1, remaining_sents, dim)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
return view.index_select(1, active_idx).view(*new_size)
def update_active_without_hidden(t):
if t is None:
return t
view = t.view(-1, remaining_sents)
new_size = list(t.size())
new_size[-1] = new_size[-1] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
return new_t
self.context = update_active_with_hidden(self.context)
self.input_seq = update_active_without_hidden(self.input_seq)
if self.src.dim() == 2:
self.src = update_active_without_hidden(self.src)
elif self.src.dim() == 3:
t = self.src
dim = t.size(-1)
view = t.view(-1, remaining_sents, dim)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
self.src = new_t
for l in self.lstm_buffer:
buffer_ = self.lstm_buffer[l]
buffer = update_active_with_hidden(buffer_)
self.lstm_buffer[l] = buffer
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is not None:
for k in buffer_.keys():
buffer_[k] = update_active_with_hidden(buffer_[k])
# For the new decoder version only
def _reorder_incremental_state(self, reorder_state):
if self.context is not None:
self.context = self.context.index_select(1, reorder_state)
# if self.src_mask is not None:
# self.src_mask = self.src_mask.index_select(0, reorder_state)
self.src = self.src.index_select(1, reorder_state)
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is not None:
for k in buffer_.keys():
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, reorder_state) # 1 for time first
for k in self.lstm_buffer:
buffer_ = self.lstm_buffer[k]
if buffer_ is not None:
self.lstm_buffer[k] = buffer_.index_select(1, reorder_state) # 1 because the first dim is n_layer
| 30,801 | 39.002597 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/perceiver.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from .relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
from onmt.modules.checkpoint import checkpoint
# from torch.utils.checkpoint import checkpoint
from onmt.modules.identity import Identity
torch.set_printoptions(threshold=500000)
class SpeechTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.experimental = opt.experimental
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
self.checkpointing = opt.checkpointing
self.mpw = opt.multilingual_partitioned_weights
self.multilingual_linear_projection = opt.multilingual_linear_projection
self.mln = opt.multilingual_layer_norm
self.no_input_scale = opt.no_input_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_pos_length = opt.max_pos_length
# TODO: multilingually linear transformation
# build_modules will be called from the inherited constructor
super().__init__(opt, dicts, positional_encoder, encoder_type, language_embeddings)
# learnable position encoding
if self.learnable_position_encoding:
# raise NotImplementedError
self.positional_encoder = None
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
if self.multilingual_linear_projection:
self.linear_proj = nn.Parameter(torch.Tensor(opt.n_languages, self.model_size, self.model_size))
std_ = math.sqrt(2.0 / (self.model_size + self.model_size))
torch.nn.init.normal_(self.linear_proj, 0.0, std_)
self.mln = opt.multilingual_layer_norm
if not opt.rezero:
self.postprocess_layer = PrePostProcessing(opt.model_size, opt.dropout, sequence='n', multilingual=self.mln,
n_languages=opt.n_languages)
else:
self.postprocess_layer = Identity()
| 3,242 | 42.24 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/classifier.py | import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
class TransformerClassifier(nn.Module):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, generator=None, mpc=False, **kwargs):
super().__init__()
self.encoder = encoder
self.generator = generator
self.num_classes = self.generator[0].linear.weight.size(0)
self.mpc = mpc
if mpc:
input_size = self.encoder.opt.input_size
model_size = self.encoder.opt.model_size
self.mpc_linear = nn.Linear(model_size, input_size)
if self.encoder.input_type == 'text':
self.src_vocab_size = self.encoder.word_lut.weight.size(0)
else:
self.src_vocab_size = 0
def forward(self, batch, *args, **kwargs):
if self.mpc and self.training:
# mask inputs with p=20%
batch.mask_mpc(p=0.2)
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
src_lengths = batch.src_lengths
src = src.transpose(0, 1) # transpose to have batch first
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths)
# feed the encoder output to generator? Or average per frame?
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = context
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = encoder_output['src_mask']
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# masked predictive coding
if self.mpc:
# mpc reconstruction
mpc_rec = self.mpc_linear(context)
output_dict['mpc'] = mpc_rec
output_dict['masked_positions'] = batch.get('masked_positions')
output_dict['original_source'] = batch.get('original_source')
return output_dict
def encode(self, batch):
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
src_lengths = batch.src_lengths
src = src.transpose(0, 1) # transpose to have batch first
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths,
return_states=True)
layer_states = encoder_output['layer_states']
return layer_states
| 2,848 | 30.655556 | 107 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/lid_loss.py | import math
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import onmt
import onmt.modules
from onmt.utils import flip
class CrossEntropyLIDLoss(_Loss):
"""
Class for managing efficient loss computation.
loss computations
Users can implement their own loss computation strategy by making
subclass of this one.
Args:
output_size: number of words in vocabulary()
"""
def __init__(self, output_size, label_smoothing):
super().__init__()
self.output_size = output_size
self.padding_idx = -1
self.smoothing_value = label_smoothing
self.confidence = 1.0 - label_smoothing
self.label_smoothing = label_smoothing
# use apex fast entropy implementation
self.fast_xentropy = fast_xentropy = False
self.fast_xentropy = False
try:
import xentropy_cuda
from onmt.modules.optimized.softmax_xentropy import SoftmaxCrossEntropyLoss
self.softmax_xentropy = SoftmaxCrossEntropyLoss.apply
self.fast_xentropy = True
except (ModuleNotFoundError, AttributeError):
self.softmax_xentropy = None
self.fast_xentropy = False
def forward(self, lid_logits, labels, mask):
"""
:param lid_logits: list of [T x B x L] logits
:param mask: [B x T]
:return:
"""
# here we should use logits instead of softmax/logsoftmax
# prediction is done before the first Transformer layers
len_t, bsz = lid_logits.size(0), lid_logits.size(1)
# labels = labels.unsqueeze(0).unsqueeze(0).repeat(n_layers, len_t, 1)
# labels can have three different forms:
if labels.ndim == 1 and labels.size(0) == 1:
labels = labels.unsqueeze(0).repeat(len_t, bsz)
elif labels.ndim == 1 and labels.size(0) == bsz:
labels = labels.unsqueeze(0).repeat(len_t)
elif labels.ndim == 2:
assert labels.size(0) == len_t, labels.size(1) == bsz
else:
raise NotImplementedError
# mask should be [B x T] -> [T x B]
mask = mask.transpose(0, 1)
# next we need to remove padding from labels and logits
# print(lid_logits.size(), labels.size(), mask.size())
logits = lid_logits.view(-1, lid_logits.size(-1))
gtruth = labels.view(-1)
padding_mask = mask.contiguous().long()
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
# print(logits.size(), gtruth.size(), non_pad_indices.size())
logits = logits.index_select(0, non_pad_indices)
gtruth = gtruth.index_select(0, non_pad_indices)
label_smoothing = self.label_smoothing if self.training else 0.0
eps_i = self.smoothing_value if self.training else 0.0
# print(logits.size(), gtruth.size())
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
nll_loss = -lprobs.gather(1, gtruth.unsqueeze(1))
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
loss = (1. - label_smoothing) * nll_loss + eps_i * smooth_loss
# if not self.fast_xentropy:
#
# else:
# half_to_float = (logits.dtype == torch.half)
# loss = self.softmax_xentropy(logits, gtruth, label_smoothing, self.padding_idx, half_to_float)
# loss = loss.sum()
return loss
| 3,563 | 32.308411 | 108 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/conformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding
import onmt
from onmt.modules.base_seq2seq import NMTModel, DecoderState
from onmt.models.speech_recognizer.lstm import SpeechLSTMDecoder, LSTMDecodingState
from onmt.modules.convolution import Conv2dSubsampling
from onmt.models.transformer_layers import PrePostProcessing
from onmt.models.discourse.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from .conformer_layers import ConformerEncoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
class ConformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
# build_modules will be called from the inherited constructor
super().__init__(opt, dicts, positional_encoder, encoder_type, language_embeddings)
# position encoding sin/cos
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# self.audio_trans = Conv2dSubsampling(opt.input_size, opt.model_size)
channels = self.channels
feature_size = opt.input_size
cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32),
nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True), nn.BatchNorm2d(32)]
# cnn = [nn.Conv2d(channels, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True),
# nn.Conv2d(32, 32, kernel_size=(3, 3), stride=2), nn.ReLU(True)]
nn.init.kaiming_normal_(cnn[0].weight, nonlinearity="relu")
nn.init.kaiming_normal_(cnn[3].weight, nonlinearity="relu")
feat_size = (((feature_size // channels) - 3) // 4) * 32
# cnn.append()
self.audio_trans = nn.Sequential(*cnn)
self.linear_trans = nn.Linear(feat_size, self.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Conformer Encoder with %.2f expected layers" % e_length)
if self.unidirectional:
print("* Running a unidirectional Encoder.")
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = ConformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
:param input: [B x T x Input_Size]
:param input_pos: [B x T] positions
:param input_lang: [B] language ids of each sample
:param streaming: connect different segments in transformer-xl style
:param kwargs:
:return:
"""
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first subsampling
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2) # [bsz, channels, time, f]
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
input = self.linear_trans(input)
emb = input
mask_src = long_mask[:, 0:emb.size(1) * 4:4].transpose(0, 1).unsqueeze(0)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
mem_len = 0
mems = None
if self.unidirectional:
qlen = input.size(0)
klen = qlen + mem_len
attn_mask_src = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
pad_mask = mask_src
mask_src = pad_mask + attn_mask_src
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
mask_src = mask_src.gt(0)
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
# emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
if self.unidirectional:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
if self.learnable_position_encoding:
raise NotImplementedError
context = emb
# Apply dropout to pos_emb
# context = self.preprocess_layer(context)
pos_emb = self.preprocess_layer(pos_emb)
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
context = layer(context, pos_emb, mask_src, src_lang=input_lang)
# final layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input})
return output_dict
class Conformer(NMTModel):
def __init__(self, encoder, decoder, generator=None, rec_decoder=None, rec_generator=None,
mirror=False, ctc=False):
super().__init__(encoder, decoder, generator, rec_decoder, rec_generator, ctc=ctc)
self.model_size = self.decoder.model_size
self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
if self.encoder.input_type == 'text':
self.src_vocab_size = self.encoder.word_lut.weight.size(0)
else:
self.src_vocab_size = 0
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False):
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
encoder_output = self.encoder(src, input_lang=src_lang)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
if zero_encoder:
context.zero_()
src_mask = encoder_output['src_mask']
decoder_output = self.decoder(tgt, context, src,
tgt_lang=tgt_lang, input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state)
decoder_output = defaultdict(lambda: None, decoder_output)
output = decoder_output['hidden']
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['reconstruct'] = None
output_dict['target'] = batch.get('target_output')
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# compute the logits for each encoder step
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
def step(self, input_t, decoder_state):
output_dict = self.decoder.step(input_t, decoder_state)
output_dict['src'] = decoder_state.src.transpose(0, 1)
# squeeze to remove the time step dimension
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
# TxB -> BxT
src_transposed = src.transpose(0, 1)
encoder_output = self.encoder(src_transposed, input_lang=src_lang)
decoder_state = LSTMDecodingState(src, tgt_lang, encoder_output['context'],
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering)
return decoder_state
def decode(self, batch):
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
tgt_pos = batch.get('target_pos')
# tgt_atb = batch.get('target_atb') # a dictionary of attributes
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
context = self.encoder(src)['context']
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
input_pos=tgt_pos)['hidden']
output = decoder_output
for dec_t, tgt_t in zip(output, tgt_output):
dec_out = defaultdict(lambda: None)
dec_out['hidden'] = dec_t.unsqueeze(0)
dec_out['src'] = src
dec_out['context'] = context
if isinstance(self.generator, nn.ModuleList):
gen_t = self.generator[0](dec_out)['logits']
else:
gen_t = self.generator(dec_out)['logits']
gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
gen_t = gen_t.squeeze(0)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores | 12,149 | 37.571429 | 126 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/wav2vec2.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import Transformer, TransformerDecodingState
from typing import List, Optional, Union
from collections import defaultdict
import onmt
from onmt.modules.optimized.linear import Linear
import math
from .fairseq_wav2vec2.file_io import PathManager
from omegaconf import DictConfig, open_dict, OmegaConf
from .fairseq_wav2vec2.utils import overwrite_args_by_name
#
# # maybe just need d / F.normalize(d, p=2, dim=2)
#
# def norm_vec_sentence_level(d, xp):
# # d : (max_len, batchsize, emb_dim)
# # trans_d : (batchsize, max_len, emb_dim)
# trans_d = xp.transpose(d, (1, 0, 2))
# norm_term = xp.linalg.norm(trans_d, axis=(1, 2), keepdims=True) + 1e-12
# trans_d = trans_d / norm_term
# d_sent_norm = xp.transpose(trans_d, (1, 0, 2))
# return d_sent_norm
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility).
If doing single-GPU training or if the checkpoint is only being loaded by at
most one process on each node (current default behavior is for only rank 0
to read the checkpoint from disk), load_on_all_ranks should be False to
avoid errors from torch.distributed not having been initialized or
torch.distributed.barrier() hanging.
If all processes on each node may be loading the checkpoint
simultaneously, load_on_all_ranks should be set to True to avoid I/O
conflicts.
There's currently no support for > 1 but < all processes loading the
checkpoint on each node.
"""
local_path = PathManager.get_local_path(path)
# The locally cached file returned by get_local_path() may be stale for
# remote files that are periodically updated/overwritten (ex:
# checkpoint_last.pt) - so we remove the local copy, sync across processes
# (if needed), and then download a fresh copy.
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
# With potentially multiple processes removing the same file, the
# file being missing is benign (missing_ok isn't available until
# Python 3.8).
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
with open(local_path, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if "args" in state and state["args"] is not None and arg_overrides is not None:
args = state["args"]
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
if "cfg" in state and state["cfg"] is not None:
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
state["cfg"] = OmegaConf.create(state["cfg"])
OmegaConf.set_struct(state["cfg"], True)
if arg_overrides is not None:
overwrite_args_by_name(state["cfg"], arg_overrides)
# state = _upgrade_state_dict(state)
return state
# defining a Wav2vec2 encoder wrapping the HuggingFace model here
class FairseqWav2VecExtractor(nn.Module):
def __init__(self, model_path="wav2vec_vox_new.pt"):
self.model_path = model_path
import fairseq
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from .fairseq_wav2vec2.wav2vec2 import Wav2Vec2Model
super().__init__()
state = load_checkpoint_to_cpu(model_path)
self.cfg = state['cfg']['model']
self.wav2vec_encoder = Wav2Vec2Model(cfg=self.cfg)
self.wav2vec_encoder.load_state_dict(state['model'])
self.wav2vec_encoder.remove_pretraining_modules()
def forward(self, batch, **kwargs):
"""
:param batch_first_output: [bsz, seq_len, hidden_size] as output size, else transpose(0, 1)
:param input: torch.Tensor [batch_size, sequence_length, 2]
:param kwargs:
:return:
"""
input = batch.get('source').transpose(0, 1) # T x B x H -> B x T x H
# 0 for tokens that are not masked, 1 for tokens that are masked
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(0).long()
input = input.narrow(2, 1, input.size(2) - 1).squeeze(-1)
attn_mask = long_mask
# wav2vec_output = self.wav2vec_encoder.extract_features(input, attn_mask, mask=self.training)
features, padding_mask = self.wav2vec_encoder.extract_conv_features(input, attn_mask)
return features, padding_mask
class FairseqWav2VecQuantizer(nn.Module):
def __init__(self, model_path="wav2vec_vox_new.pt"):
self.model_path = model_path
# import fairseq
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from .fairseq_wav2vec2.wav2vec2 import Wav2Vec2Model
super().__init__()
state = load_checkpoint_to_cpu(model_path)
self.cfg = state['cfg']['model']
self.wav2vec_encoder = Wav2Vec2Model(cfg=self.cfg)
self.wav2vec_encoder.load_state_dict(state['model'])
def forward(self, batch, **kwargs):
"""
:param batch_first_output: [bsz, seq_len, hidden_size] as output size, else transpose(0, 1)
:param input: torch.Tensor [batch_size, sequence_length, 2]
:param kwargs:
:return:
"""
input = batch.get('source').transpose(0, 1) # T x B x H -> B x T x H
# 0 for tokens that are not masked, 1 for tokens that are masked
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(0).long()
input = input.narrow(2, 1, input.size(2) - 1).squeeze(-1)
attn_mask = long_mask
wav2vec_output = self.wav2vec_encoder(input, attn_mask, mask=False,
quantize=True, quantize_only=True,
)
codes = wav2vec_output['quantized_target']
padding_mask = wav2vec_output['padding_mask']
return codes, padding_mask
class FairseqWav2Vec(nn.Module):
def __init__(self, opt, model_path="wav2vec_vox_new.pt",
stacked_encoder=None, **kwargs):
super().__init__()
# do we need opt for this?
self.opt = opt
self.model_path = model_path
# import fairseq
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
# from fairseq.models.wav2vec.wav2vec2 import Wav2Vec2Model
from .fairseq_wav2vec2.wav2vec2 import Wav2Vec2Model
state = load_checkpoint_to_cpu(model_path)
self.cfg = state['cfg']['model']
# don't override the options for wav2vec yet (some of them can create NaN)
self.cfg.dropout = self.opt.enc_pretrain_emb_dropout
# self.cfg.activation_dropout = self.opt.ffn_dropout
self.cfg.attention_dropout = self.opt.enc_pretrain_hidden_dropout
self.cfg.encoder_layerdrop = self.opt.death_rate
# self.cfg.dropout_features = self.opt.emb_dropout
# self.cfg.mask_channel_before = True
self.cfg.mask_channel_prob = 0.2 if self.opt.wav2vec_spec_augment else 0.0
self.cfg.mask_channel_length = 64
self.cfg.mask_prob = 0.0
self.wav2vec_encoder = Wav2Vec2Model(cfg=self.cfg, favor=opt.favor_attention,
weight_drop=opt.weight_drop,
predict_language=opt.predict_language,
n_languages=opt.n_languages)
self.favor = opt.favor_attention
if self.favor:
from onmt.modules.performer import ProjectionUpdater
self.proj_updater = ProjectionUpdater(self.wav2vec_encoder.encoder,
feature_redraw_interval=1000)
self.auto_check_redraw = True
# load wav2vec weights
wav2vec_weights = state['model']
existed_weights = self.wav2vec_encoder.state_dict()
# if we add new weights/buffers to new model then put them into the state_dict
keys = existed_weights.keys()
for key in keys:
if key not in wav2vec_weights:
wav2vec_weights[key] = existed_weights[key]
self.wav2vec_encoder.load_state_dict(state['model'])
removing_quantizer = not opt.wav2vec2_quantize
# remove the quantization modules
# print("removing quantization modules", removing_quantizer)
self.wav2vec_encoder.remove_pretraining_modules(removing_quantizer=removing_quantizer)
cfg = self.wav2vec_encoder.cfg
assert self.opt.model_size == cfg.encoder_embed_dim, \
"Expect self.opt.model_size (%d) and cfg.encoder_embed_dim (%d) to equal " \
% (self.opt.model_size, cfg.encoder_embed_dim)
self.input_type = self.opt.encoder_type
self.model_size = cfg.encoder_embed_dim
self.wav2vec_encoder.feature_grad_mult = 0.0
self.time = None
self.quantize = opt.wav2vec2_quantize
self.dual_output = opt.wav2vec2_dual_output and self.quantize
if stacked_encoder is not None:
self.wav2vec_encoder.add_stacked_encoder(stacked_encoder)
# freezing the parameters of the Convolutional feature extractors (by default)
for param in self.wav2vec_encoder.feature_extractor.parameters():
param.requires_grad = False
# TODO:
# add relative attention
if (hasattr(opt, 'wav2vec2_relative_attention') and opt.wav2vec2_relative_attention) or \
(hasattr(opt, 'add_relative_attention') and opt.add_relative_attention):
print("[INFO] Add relative attention for wav2vec")
self.wav2vec_encoder.add_relative_attention()
self.rotary_position_encoding = opt.rotary_position_encoding
if self.rotary_position_encoding:
assert not (hasattr(opt, 'wav2vec2_relative_attention') and opt.wav2vec2_relative_attention)
self.wav2vec_encoder.add_rotary_attention()
# freeze the whole encoder. needs to do this first before adding customized parameters
if opt.freeze_encoder:
print("[INFO] Freezing encoder parameters")
for p in self.wav2vec_encoder.parameters():
p.requires_grad = False
if opt.freeze_encoder_ffn:
self.freeze_ffn_params()
# then add factorize
if opt.multilingual_factorized_weights:
print("[INFO] Factorizing Wav2vec model into %d languages and %d factors"
% (opt.n_languages, opt.n_attributes))
self.wav2vec_encoder.encoder.add_factorize(opt.n_languages, rank=opt.mfw_rank,
multiplicative=opt.mfw_multiplicative,
fast=opt.fast_factorize)
# or adapter
if opt.wav2vec_adapter > 0:
print("[INFO] Adding adapters for Wav2vec model with %d languages" % opt.n_languages)
self.wav2vec_encoder.encoder.add_adapters(opt.n_languages, adapter_location=opt.wav2vec_adapter)
# can receive an mbart or deltalm encoder
# self.stacked_encoder = stacked_encoder
# TODO: length conversion layer
# if stacked_encoder is not None:
# self.stacked_encoder = stacked_encoder
# self.conv_downsampler = nn.ModuleList()
#
# from .fairseq_wav2vec2.fairseq_modules import TransposeLast
# from onmt.modules.layer_norm import LayerNorm
# for i in range(3):
#
# def make_conv(n_in, n_out, k, stride=2, padding=1):
# conv = nn.Conv1d(n_in, n_out, k, stride=stride, padding=padding, bias=False)
# torch.nn.init.kaiming_normal_(conv.weight)
# return conv
#
# conv = nn.Sequential(
# make_conv(self.model_size, self.model_size, 4, stride=2, padding=1),
# nn.Sequential(
# TransposeLast(),
# LayerNorm(self.model_size),
# TransposeLast(),
# ),
# nn.GELU(),
# )
#
# self.conv_downsampler.append(conv)
else:
self.stacked_encoder = None
self.conv_downsampler = None
# discrete encoder that works on top of the wav quantized output
# self.discrete_encoder = None # discrete_encoder
# if self.quantize:
# var_dim = self.wav2vec_encoder.quantizer.vars.size(-1) * self.wav2vec_encoder.quantizer.groups
# model_dim = self.model_size
# self.discrete_encoder = nn.Linear(var_dim, model_dim)
# if discrete_encoder is not None:
# assert self.quantize is True
#
# codebook_size = self.wav2vec_encoder.quantizer.num_vars ** self.wav2vec_encoder.quantizer.groups
# embed_dim = self.discrete_encoder.embed_dim
# var_dim = self.wav2vec_encoder.quantizer.vars.size(-1) * self.wav2vec_encoder.quantizer.groups
# # new embedding layer
# # self.discrete_encoder.embed_tokens = nn.Linear(var_dim, embed_dim) #nn.Embedding(codebook_size, embed_dim)
# self.discrete_encoder.embed_tokens = nn.Embedding(codebook_size, embed_dim)
# nn.init.normal_(self.discrete_encoder.embed_tokens.weight, 0.0, 0.02)
#
# # freeze the quantizer
# for param in self.wav2vec_encoder.quantizer.parameters():
# param.requires_grad = False
#
# for param in self.wav2vec_encoder.layer_norm.parameters():
# param.requires_grad = False
def fix_projection_matrices_(self):
if self.favor:
self.proj_updater.fix_projections_()
def convert_fast_attention(self):
self.wav2vec_encoder.convert_fast_attention()
def freeze_ffn_params(self):
for layer in self.wav2vec_encoder.encoder.layers:
for p in layer.fc1.parameters():
p.requires_grad = False
for p in layer.fc2.parameters():
p.requires_grad = False
def test_run(self, input, mask):
# input should have size [B x T x H]
# H == 1: audio samples
# H > 1: precomputed samples
if input.size(-1) == 1:
precomputed_tdnn = False
input = input.squeeze(-1)
else:
precomputed_tdnn = True
wav2vec_output = self.wav2vec_encoder.extract_features(input, mask,
mask=False,
precomputed_tdnn=precomputed_tdnn,
lang=None, mixture=None)
context = wav2vec_output['x']
return context
def forward(self, input, batch_first_output=False, adv_ptb_grad=False, input_ptb=None,
lang=None, atb=None,
checkpointing_ffn=False, checkpointing_self_attn=False, **kwargs):
"""
:param checkpointing_self_attn:
:param checkpointing_ffn:
:param atb:
:param lang:
:param input_ptb: perturbation added to the input itself
:param adv_ptb_grad: adversarial perturbation step which we need the gradients w.r.t the input (wavs)
:param batch_first_output: [bsz, seq_len, hidden_size] as output size, else transpose(0, 1)
:param input: torch.Tensor [batch_size, sequence_length, 2]
:param kwargs:
:return:
"""
# The data has been constructed that the first dimension is padding mask
# 0 for tokens that are not masked, 1 for tokens that are masked
with torch.no_grad():
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(0).long()
input = input.narrow(2, 1, input.size(2) - 1)
if adv_ptb_grad:
input.requires_grad = True
if input_ptb is not None:
assert not adv_ptb_grad
with torch.no_grad():
# normalize and add to input / maybe scale over input length?
# do this under fp32
with torch.cuda.amp.autocast(enabled=False):
epsilon = 1.0
input_ptb = input_ptb.float()
input_ptb = input_ptb / F.normalize(input_ptb, p=2.0, dim=2)
input = input.float() + input_ptb * epsilon
if input.size(-1) == 1:
precomputed_tdnn = False
input = input.squeeze(-1)
else:
precomputed_tdnn = True
attn_mask = long_mask
if self.favor: # favor+ attention
if self.auto_check_redraw:
# print("Redraw projection ....")
self.proj_updater.redraw_projections()
quantize_only = False # self.quantize and not self.dual_output
# don't mask when precomputed tdnn is used, because spec augmentation is used in the dataset
wav2vec_output = self.wav2vec_encoder(input, attn_mask,
mask=self.training, features_only=True, layer=None,
precomputed_tdnn=precomputed_tdnn, quantize=self.quantize,
quantize_only=quantize_only,
lang=lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
# if self.quantize:
# quantized_codebooks = wav2vec_output['quantized_target']
# encoder_input = quantized_codebooks.prod(dim=-1, keepdim=False) # .transpose(0, 1) # -> t x b x groups
# dec_attn_mask = wav2vec_output['padding_mask'] # b x t
#
# # 44204 = magic number
# additional_mask = encoder_input.eq(44204)
#
# if dec_attn_mask is not None:
# dec_attn_mask = torch.logical_or(dec_attn_mask.bool(), additional_mask)
# else:
# dec_attn_mask = additional_mask
#
# discrete_encoder_output = self.discrete_encoder(input_ids=encoder_input, attention_mask=dec_attn_mask)
# discrete_output = discrete_encoder_output[0]
# batch_size, time = discrete_output.size(1), discrete_output.size(0)
# if batch_first_output:
# discrete_output = discrete_output.transpose(0, 1).contiguous()
# batch_size, time = discrete_output.size(0), discrete_output.size(1)
# else:
# discrete_output = None
# output size is always T x B x C
continuous_output = wav2vec_output['x']
time, batch_size = continuous_output.size(0), continuous_output.size(1)
# mask size is B x T (1 for padded positions, 0 for unpadded)
dec_attn_mask = wav2vec_output['padding_mask']
if self.quantize:
quantized_output = wav2vec_output['quantized_x']
discrete_output = self.discrete_encoder(quantized_output)
discrete_output = discrete_output.transpose(0, 1).contiguous()
context = continuous_output + discrete_output
else:
context = continuous_output
if dec_attn_mask is None:
dec_attn_mask = context.new_zeros(batch_size, time).byte()
else:
dec_attn_mask = dec_attn_mask.byte()
wav2vec_context = context
wav2vec_padding_mask = dec_attn_mask
# # TODO: make the stacked encoder run here
# if self.stacked_encoder is not None:
# # assert self.conv_downsampler is not None
# #
# # # T x B x C -> B x C x T
# # context = context.transpose(0, 1).transpose(1, 2).contiguous()
# #
# # # apply convolutions to downsample the size
# # for conv in self.conv_downsampler:
# # context = conv(context)
# #
# # # B x C x T -> B x T x C
# # context = context.transpose(1, 2).contiguous()
# #
# # padding_mask = dec_attn_mask
# #
# # # TODO: recompute the padding_mask from length
# # with torch.no_grad():
# # input_lengths = (1 - padding_mask.long()).sum(-1)
# #
# # def _conv_out_length(input_length, conv):
# # kernel_size = conv.kernel_size[0]
# # stride = conv.kernel_size[0]
# # padding = conv.padding[0]
# #
# # return torch.floor((input_length - kernel_size + 2 * padding) / stride + 1)
# #
# # for conv_block in self.conv_downsampler:
# # input_lengths = _conv_out_length(
# # input_lengths, conv_block[0]
# # )
# #
# # input_lengths = input_lengths.to(torch.long)
# #
# # padding_mask = torch.zeros(
# # context.shape[:2], dtype=context.dtype, device=context.device
# # )
# #
# # padding_mask[
# # (
# # torch.arange(padding_mask.shape[0], device=padding_mask.device),
# # input_lengths - 1,
# # )
# # ] = 1
# #
# # padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
# #
# # dec_attn_mask = padding_mask
# context = context.transpose(0, 1).contiguous()
#
# # run the output through the stacked encoder
# stacked_encoder_output = self.stacked_encoder(inputs_embeds=context, attention_mask=dec_attn_mask,
# checkpointing_ffn=checkpointing_ffn)
# context = stacked_encoder_output[0]
# how to get the correct attention mask?
output_dict = defaultdict(lambda: None, {'source': input, 'context': context, 'src_mask': dec_attn_mask,
'src': dec_attn_mask, 'pos_emb': None,
'wav2vec_context': wav2vec_context,
'wav2vec_padding_mask': wav2vec_padding_mask,
'enc_pred_lang': wav2vec_output['pred_lang']})
return output_dict
class Wav2vecTransformer(Transformer):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None,
mirror=False, ctc=False, **kwargs):
super().__init__(encoder, decoder, generator, None, None, ctc=ctc)
self.model_size = self.decoder.model_size
self.switchout = self.decoder.switchout
if mirror:
self.mirror_decoder = copy.deepcopy(self.decoder)
self.mirror_g = nn.Linear(decoder.model_size, decoder.model_size)
self.mirror_generator = copy.deepcopy(self.generator)
self.mirror_generator[0].linear.weight = self.decoder.word_lut.weight
if self.ctc:
self.ctc_linear = Linear(encoder.model_size, self.tgt_vocab_size)
def reset_states(self):
return
def forward(self, batch, adv_ptb_grad=False, input_ptb=None, factorize=False,
mirror=False, target_mask=None, **kwargs):
"""
:param factorize:
:param mirror:
:param adv_ptb_grad: If we need to tell the model to set input.requires_grad=True (1st step)
:param input_ptb: 2nd step of adversarial: add the perturbation to input
:param batch: data object sent from the dataset
:return:
"""
if self.switchout > 0 and self.training:
batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
encoder_output = self.encoder(src, adv_ptb_grad=adv_ptb_grad, input_ptb=input_ptb)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
src = encoder_output['src']
# pass the mask ('src') from the encoder output the decoder as the attention mask
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
factorize=factorize)
decoder_output = defaultdict(lambda: None, decoder_output)
output = decoder_output['hidden']
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['target'] = batch.get('target_output')
output_dict['source'] = encoder_output['source']
# final layer: computing softmax
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
output_dict['reconstruct'] = False
# compute the logits for each encoder step
if self.ctc:
# raise NotImplementedError
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
return output_dict
# load pretrained wav2vec weights
def load_encoder_weights(self, checkpoint):
self.encoder.wav2vec_encoder.load_state_dict(checkpoint['model'])
def create_decoder_state(self, batch, beam_size=1, type=2, buffering=True,
pretrained_layer_states=None, **kwargs):
"""
Generate a new decoder state based on the batch input
:param pretrained_layer_states:
:param buffering:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
tgt_atb = batch.get('target_atbs')
src_atb = batch.get('source_atbs')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # transpose -> batch first
encoder_output = self.encoder(src_transposed)
src = encoder_output['src'].transpose(0, 1)
src_mask = encoder_output['src']
print("[INFO] create Transformer decoding state with buffering", buffering)
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering, src_mask=src_mask)
return decoder_state
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
log_prob = self.generator[0](output_dict)['logits'].squeeze(0)
log_prob = torch.nn.functional.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
class Wav2vecBERT(Wav2vecTransformer):
def __init__(self, encoder, decoder, generator=None,
mirror=False, ctc=False, encoder_type='wav2vec2',
decoder_type='bart',
sub_encoder=None, mutual_modality_training=False, **kwargs):
super().__init__(encoder, decoder, generator, mirror=mirror, ctc=ctc)
self.src_vocab_size = 0
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.sub_encoder = sub_encoder
if hasattr(decoder, 'dec_pretrained_model') and decoder.dec_pretrained_model:
try:
self.model_size = self.decoder.config.bert_hidden_size
self.tgt_vocab_size = self.decoder.config.vocab_size
except AttributeError:
self.model_size = self.decoder.model_size
self.tgt_vocab_size = self.generator[0].linear.weight.size(0)
self.switchout = 0
else:
self.model_size = self.decoder.model_size
self.tgt_vocab_size = self.decoder.word_lut.weight.size(0)
self.switchout = self.decoder.switchout
if mirror:
self.mirror_decoder = copy.deepcopy(self.decoder)
self.mirror_g = nn.Linear(decoder.model_size, decoder.model_size)
self.mirror_generator = copy.deepcopy(self.generator)
self.mirror_generator[0].linear.weight = self.decoder.word_lut.weight
if self.ctc:
self.ctc_linear = nn.Linear(encoder.model_size, self.tgt_vocab_size)
def forward(self, batch, zero_encoder=False, factorize=False, target_mask=None, mirror=False,
checkpointing_ffn=False,
checkpointing_cross_attn=False,
checkpointing_self_attn=False,
**kwargs):
"""
:param checkpointing_self_attn:
:param checkpointing_cross_attn:
:param checkpointing_ffn:
:param batch:
:param zero_encoder:
:param factorize:
:param target_mask:
:param mirror:
:param kwargs:
:return:
"""
if self.switchout > 0 and self.training:
batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_atb = batch.get('source_atbs')
tgt_atb = batch.get('target_atbs')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
batch_first_output = False
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bart"]:
batch_first_output = True
# print(src_lang, src_atb, tgt_lang, tgt_atb)
# during training mixture is always None
encoder_output = self.encoder(src, batch_first_output=batch_first_output,
lang=src_lang, atb=src_atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
src_attention_mask = encoder_output['src']
contrastive_loss = 0
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bert", "roberta"]:
# src: [b, src_l] context: [b, src_l, de_model]
tgt_token_type = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
tgt_attention_mask = tgt.new(*tgt.size()).fill_(1) # [bsz, len]
decoder_output = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
token_type_ids=tgt_token_type,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask,
no_offset=True,
)
decoder_output = decoder_output[0]
output = decoder_output.transpose(0, 1) # [bsz, tgt_len, d] => [tgt_len, bsz, d]
output_dict = defaultdict(lambda: None)
context = context.transpose(0, 1) # to [src_l, b, de_model]
elif hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bart"]:
tgt_token_type = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
tgt_attention_mask = tgt.new(*tgt.size()).fill_(1) # [bsz, len]
# the wav2vec returned mask is 1 for masked and 0 for un-masked, which is opposite to huggingface
src_attention_mask = 1 - (src_attention_mask.long())
decoder_output = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask)
decoder_output = decoder_output[0]
output = decoder_output.transpose(0, 1) # [bsz, tgt_len, d] => [tgt_len, bsz, d]
context = context.transpose(0, 1)
output_dict = defaultdict(lambda: None)
elif hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model \
in ["deltalm", "mbart", "mbart50"]:
if self.sub_encoder is not None:
src_text_input = batch.get('target')
sub_context_mask = batch.get('tgt_selfattn_mask')
with torch.no_grad():
sub_encoder_output = self.sub_encoder(input_ids=src_text_input,
attention_mask=sub_context_mask)
sub_context = sub_encoder_output[0]
# print(torch.isnan(sub_context).float().sum())
else:
sub_context = None
sub_context_mask = None
src_attention_mask = src_attention_mask # new version
# tgt_attention_mask = tgt.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
# tgt_attention_mask = tgt.new(*tgt.size()).fill_(1)
tgt_attention_mask = batch.get('target_input_selfattn_mask')
if encoder_output['enc_pred_lang'] is not None:
_src_lang = torch.nn.functional.softmax(encoder_output['enc_pred_lang'], dim=-1, dtype=torch.float32)
else:
_src_lang = src_lang
decoder_outputs = self.decoder(input_ids=tgt,
attention_mask=tgt_attention_mask,
encoder_hidden_states=context,
encoder_attention_mask=src_attention_mask,
sub_encoder_hidden_states=sub_context,
sub_encoder_attention_mask=sub_context_mask,
lang=tgt_lang, atb=tgt_atb,
src_lang=_src_lang,
checkpointing_ffn=checkpointing_ffn,
checkpointing_cross_attn=checkpointing_cross_attn,
checkpointing_self_attn=checkpointing_self_attn)
decoder_output = decoder_outputs[0]
# contrastive_loss = decoder_outputs[-1]
output = decoder_output
output_dict = defaultdict(lambda: None)
else:
# pass the mask ('src') from the encoder output the decoder as the attention mask
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
factorize=factorize)
decoder_output = defaultdict(lambda: None, decoder_output)
output = decoder_output['hidden']
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['target'] = batch.get('target_output')
output_dict['wav2vec_context'] = encoder_output['wav2vec_context']
output_dict['wav2vec_padding_mask'] = encoder_output['wav2vec_padding_mask']
output_dict['enc_pred_lang'] = encoder_output['enc_pred_lang']
if output_dict['enc_pred_lang'] is not None:
output_dict['dec_pred_lang'] = decoder_outputs[-1]
# final layer: computing softmax
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
output_dict['reconstruct'] = False
# compute the logits for each encoder step
if self.ctc:
# run the ctcoutput via the wav2vec context (not context)
output_dict['encoder_logits'] = self.ctc_linear(output_dict['wav2vec_context'])
if self.sub_encoder is not None:
# contrastive loss has size: t x b x h
# stacked sum from multiple layers
contrastive_loss = contrastive_loss.transpose(0, 1).contiguous()
# the input is the target full without the final token so
# remove the last time step from the mask
mask = sub_context_mask[:, :-1].unsqueeze(-1) # b x t x 1
contrastive_loss.masked_fill_(mask, 0) # masked values = zero
output_dict['contrastive_loss'] = contrastive_loss.sum()
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_atb = batch.get('source_atbs')
tgt_atb = batch.get('target_atbs')
encoder_output = self.encoder(src.transpose(0, 1), batch_first_output=False,
lang=src_lang, atb=src_atb)
src_attention_mask = encoder_output['src']
dec_pretrained_model = self.decoder.dec_pretrained_model
if not dec_pretrained_model:
mask_src = None
elif dec_pretrained_model in ["bert", "roberta"]:
mask_src = src_attention_mask.unsqueeze(1) # batch_size x 1 x len_src for broadcasting
elif dec_pretrained_model in ["bart"]:
mask_src = 1 - (src_attention_mask.long())
elif dec_pretrained_model in ["deltalm", "mbart", "mbart50"]:
mask_src = src_attention_mask
else:
print("Warning: unknown dec_pretrained_model")
raise NotImplementedError
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering, src_mask=mask_src,
dec_pretrained_model=self.decoder.dec_pretrained_model,
tgt_atb=tgt_atb)
return decoder_state
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
if hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model in ["bert", "roberta"]:
self.generator[0].linear.weight = self.decoder.embeddings.word_embeddings.weight
elif hasattr(self.decoder, 'dec_pretrained_model') and self.decoder.dec_pretrained_model \
in ["mbart", "mbart50", "deltalm"]:
self.generator[0].linear.weight = self.decoder.embed_tokens.weight
else:
self.generator[0].linear.weight = self.decoder.word_lut.weight
def decode(self, batch):
raise NotImplementedError
# """
# :param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
# :return: gold_scores (torch.Tensor) log probs for each sentence
# gold_words (Int) the total number of non-padded tokens
# allgold_scores (list of Tensors) log probs for each word in the sentence
# """
#
# src = batch.get('source')
# src_pos = batch.get('source_pos')
# tgt_input = batch.get('target_input')
# tgt_output = batch.get('target_output')
# tgt_pos = batch.get('target_pos')
# # tgt_atb = batch.get('target_atb') # a dictionary of attributes
# src_lang = batch.get('source_lang')
# tgt_lang = batch.get('target_lang')
#
# # transpose to have batch first
# src = src.transpose(0, 1)
# tgt_input = tgt_input.transpose(0, 1)
# batch_size = tgt_input.size(0)
#
# context = self.encoder(src, input_pos=src_pos, input_lang=src_lang)['context']
#
# if hasattr(self, 'autoencoder') and self.autoencoder \
# and self.autoencoder.representation == "EncoderHiddenState":
# context = self.autoencoder.autocode(context)
#
# gold_scores = context.new(batch_size).zero_()
# gold_words = 0
# allgold_scores = list()
# decoder_output = self.decoder(tgt_input, context, src, tgt_lang=tgt_lang, src_lang=src_lang,
# input_pos=tgt_pos)['hidden']
#
# output = decoder_output
#
# if hasattr(self, 'autoencoder') and self.autoencoder and \
# self.autoencoder.representation == "DecoderHiddenState":
# output = self.autoencoder.autocode(output)
#
# for dec_t, tgt_t in zip(output, tgt_output):
#
# dec_out = defaultdict(lambda: None)
# dec_out['hidden'] = dec_t.unsqueeze(0)
# dec_out['src'] = src
# dec_out['context'] = context
#
# if isinstance(self.generator, nn.ModuleList):
# gen_t = self.generator[0](dec_out)['logits']
# else:
# gen_t = self.generator(dec_out)['logits']
# gen_t = F.log_softmax(gen_t, dim=-1, dtype=torch.float32)
# gen_t = gen_t.squeeze(0)
# tgt_t = tgt_t.unsqueeze(1)
# scores = gen_t.gather(1, tgt_t)
# scores.masked_fill_(tgt_t.eq(onmt.constants.TGT_PAD), 0)
# gold_scores += scores.squeeze(1).type_as(gold_scores)
# gold_words += tgt_t.ne(onmt.constants.TGT_PAD).sum().item()
# allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
#
# return gold_words, gold_scores, allgold_scores
| 47,708 | 43.839286 | 123 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/wavlm.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import Transformer, TransformerDecodingState
from typing import List, Optional, Union
from collections import defaultdict
import onmt
from onmt.modules.optimized.linear import Linear
import math
from .fairseq_wav2vec2.file_io import PathManager
from omegaconf import DictConfig, open_dict, OmegaConf
from .fairseq_wav2vec2.utils import overwrite_args_by_name
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility).
If doing single-GPU training or if the checkpoint is only being loaded by at
most one process on each node (current default behavior is for only rank 0
to read the checkpoint from disk), load_on_all_ranks should be False to
avoid errors from torch.distributed not having been initialized or
torch.distributed.barrier() hanging.
If all processes on each node may be loading the checkpoint
simultaneously, load_on_all_ranks should be set to True to avoid I/O
conflicts.
There's currently no support for > 1 but < all processes loading the
checkpoint on each node.
"""
local_path = PathManager.get_local_path(path)
# The locally cached file returned by get_local_path() may be stale for
# remote files that are periodically updated/overwritten (ex:
# checkpoint_last.pt) - so we remove the local copy, sync across processes
# (if needed), and then download a fresh copy.
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
# With potentially multiple processes removing the same file, the
# file being missing is benign (missing_ok isn't available until
# Python 3.8).
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
with open(local_path, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if "args" in state and state["args"] is not None and arg_overrides is not None:
args = state["args"]
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
return state
class WavLMEncoder(nn.Module):
def __init__(self, opt, model_path="wav2vec_vox_new.pt",
**kwargs):
super().__init__()
# do we need opt for this?
self.opt = opt
self.model_path = model_path
# import fairseq
# from fairseq.models.wav2vec.wav2vec2 import Wav2Vec2Model
from .fairseq_wav2vec2.wavlm import WavLM, WavLMConfig
state = load_checkpoint_to_cpu(model_path)
self.cfg = WavLMConfig(state['cfg'])
print("Overiding WavLM dropout ....")
self.cfg.dropout = self.opt.enc_pretrain_emb_dropout
self.cfg.activation_dropout = self.opt.ffn_dropout
self.cfg.encoder_layerdrop = self.opt.death_rate
# self.cfg.dropout_features = self.opt.emb_dropout
# self.cfg.mask_channel_before = True
self.cfg.mask_channel_prob = 0.2 if self.opt.wav2vec_spec_augment else 0.0
self.cfg.mask_channel_length = 64
self.cfg.mask_prob = 0.0
self.wav2vec_encoder = WavLM(self.cfg)
# load wav2vec weights
wav2vec_weights = state['model']
existed_weights = self.wav2vec_encoder.state_dict()
# if we add new weights/buffers to new model then put them into the state_dict
keys = existed_weights.keys()
for key in keys:
if key not in wav2vec_weights:
wav2vec_weights[key] = existed_weights[key]
self.wav2vec_encoder.load_state_dict(wav2vec_weights)
cfg = self.cfg
assert self.opt.model_size == cfg.encoder_embed_dim, \
"Expect self.opt.model_size (%d) and cfg.encoder_embed_dim (%d) to equal " \
% (self.opt.model_size, cfg.encoder_embed_dim)
self.input_type = self.opt.encoder_type
self.model_size = cfg.encoder_embed_dim
self.time = None
# freezing the parameters of the Convolutional feature extractors (by default)
for param in self.wav2vec_encoder.feature_extractor.parameters():
param.requires_grad = False
# freeze the whole encoder. needs to do this first before adding customized parameters
if opt.freeze_encoder:
print("[INFO] Freezing encoder parameters")
for p in self.wav2vec_encoder.parameters():
p.requires_grad = False
if opt.freeze_encoder_ffn:
self.freeze_ffn_params()
def freeze_ffn_params(self):
for layer in self.wav2vec_encoder.encoder.layers:
for p in layer.fc1.parameters():
p.requires_grad = False
for p in layer.fc2.parameters():
p.requires_grad = False
def forward(self, input, batch_first_output=False, adv_ptb_grad=False, input_ptb=None,
lang=None, atb=None,
checkpointing_ffn=False, checkpointing_self_attn=False, **kwargs):
"""
:param checkpointing_self_attn:
:param checkpointing_ffn:
:param atb:
:param lang:
:param input_ptb: perturbation added to the input itself
:param adv_ptb_grad: adversarial perturbation step which we need the gradients w.r.t the input (wavs)
:param batch_first_output: [bsz, seq_len, hidden_size] as output size, else transpose(0, 1)
:param input: torch.Tensor [batch_size, sequence_length, 2]
:param kwargs:
:return:
"""
# 0 for tokens that are not masked, 1 for tokens that are masked
with torch.no_grad():
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(0).long()
input = input.narrow(2, 1, input.size(2) - 1)
if adv_ptb_grad:
input.requires_grad = True
if input_ptb is not None:
assert not adv_ptb_grad
with torch.no_grad():
# normalize and add to input / maybe scale over input length?
# do this under fp32
with torch.cuda.amp.autocast(enabled=False):
epsilon = 1.0
input_ptb = input_ptb.float()
input_ptb = input_ptb / F.normalize(input_ptb, p=2.0, dim=2)
input = input.float() + input_ptb * epsilon
if input.size(-1) == 1:
precomputed_tdnn = False
input = input.squeeze(-1)
else:
precomputed_tdnn = True
attn_mask = long_mask
quantize_only = False # self.quantize and not self.dual_output
# don't mask when precomputed tdnn is used, because spec augmentation is used in the dataset
wav2vec_output = self.wav2vec_encoder(input, attn_mask,
mask=self.training, features_only=True, layer=None,
precomputed_tdnn=precomputed_tdnn,
quantize_only=quantize_only,
lang=lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
# output size is always T x B x C
continuous_output = wav2vec_output['x']
time, batch_size = continuous_output.size(0), continuous_output.size(1)
# mask size is B x T (1 for padded positions, 0 for unpadded)
dec_attn_mask = wav2vec_output['padding_mask']
context = continuous_output
if dec_attn_mask is None:
dec_attn_mask = context.new_zeros(batch_size, time).byte()
else:
dec_attn_mask = dec_attn_mask.byte()
wav2vec_context = context
wav2vec_padding_mask = dec_attn_mask
output_dict = defaultdict(lambda: None, {'source': input, 'context': context, 'src_mask': wav2vec_padding_mask,
'src': wav2vec_padding_mask, 'pos_emb': None,
'wav2vec_context': wav2vec_context,
'wav2vec_padding_mask': wav2vec_padding_mask})
return output_dict
| 8,507 | 40.300971 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/conformer_layers.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.dropout import variational_dropout
from onmt.modules.convolution import ConformerConvBlock
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.multilingual_factorized.linear import MFWPositionWiseFeedForward
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
from onmt.modules.multilingual_factorized.relative_attention import MFWRelativeSelfMultiheadAttn
class ConformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super(ConformerEncoderLayer, self).__init__()
# FFN -> SelfAttention -> Conv -> FFN
# PreNorm
self.opt = opt
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.ffn_scale = 0.5
self.mfw = opt.multilingual_factorized_weights
self.weight_drop = opt.weight_drop
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if self.mfw:
self.attn = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
else:
self.attn = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.preprocess_mcr_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
if self.mfw:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
else:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
# there is batch norm inside convolution already
# so no need for layer norm?
self.preprocess_conv = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_conv = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.conv = ConformerConvBlock(opt.model_size, opt.conv_kernel)
def forward(self, input, pos_emb, attn_mask, incremental=False, incremental_cache=None, mems=None,
src_lang=None):
assert incremental is False
assert incremental_cache is None
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
if coin:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang)
out = out * ffn_scale
if not self.variational:
out = F.dropout(out, p=self.dropout, training=self.training)
else:
out = variational_dropout(out, p=self.dropout, training=self.training)
input = input + out
# attention
attn_input = self.preprocess_attn(input)
if self.mfw:
out, _ = self.attn(attn_input, pos_emb, src_lang, attn_mask, None)
else:
out, _ = self.attn(attn_input, pos_emb, attn_mask, None)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
# convolution
conv_input = self.preprocess_conv(input)
out = self.conv(conv_input)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_conv(out, input)
# last ffn
out = self.feedforward(self.preprocess_ffn(input), src_lang)
out = out * ffn_scale
if not self.variational:
out = F.dropout(out, p=self.dropout, training=self.training)
else:
out = variational_dropout(out, p=self.dropout, training=self.training)
input = input + out
return input
return input
| 6,311 | 43.450704 | 109 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/relative_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from .relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
from onmt.modules.checkpoint import checkpoint
# from torch.utils.checkpoint import checkpoint
from onmt.modules.identity import Identity
torch.set_printoptions(threshold=500000)
def create_forward_function(module):
def forward_pass(*inputs):
return module(*inputs)
return forward_pass
class SpeechTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.experimental = opt.experimental
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
self.checkpointing = opt.checkpointing
self.mpw = opt.multilingual_partitioned_weights
self.multilingual_linear_projection = opt.multilingual_linear_projection
self.mln = opt.multilingual_layer_norm
self.no_input_scale = opt.no_input_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.rotary_position_encoding = opt.rotary_position_encoding
self.max_pos_length = opt.max_pos_length
# TODO: multilingually linear transformation
# build_modules will be called from the inherited constructor
super().__init__(opt, dicts, positional_encoder, encoder_type, language_embeddings)
# learnable position encoding
if self.learnable_position_encoding:
assert not self.rotary_position_encoding
self.positional_encoder = None
elif self.rotary_position_encoding:
from onmt.modules.rotary_postional_encodings import SinusoidalEmbeddings
self.positional_encoder = SinusoidalEmbeddings(opt.model_size // opt.n_heads)
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
if self.multilingual_linear_projection:
self.linear_proj = nn.Parameter(torch.Tensor(opt.n_languages, self.model_size, self.model_size))
std_ = math.sqrt(2.0 / (self.model_size + self.model_size))
torch.nn.init.normal_(self.linear_proj, 0.0, std_)
self.mln = opt.multilingual_layer_norm
if not opt.rezero:
self.postprocess_layer = PrePostProcessing(opt.model_size, opt.dropout, sequence='n', multilingual=self.mln,
n_languages=opt.n_languages)
else:
self.postprocess_layer = Identity()
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Encoder with Relative Attention with %.2f expected layers" % e_length)
if self.unidirectional:
print("* Running a unidirectional Encoder.")
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = RelativeTransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, factorize=True,
return_states=False, pretrained_layer_states=None, **kwargs):
"""
:param pretrained_layer_states:
:param return_states: also return the (unnormalized) outputs of each state
:param factorize:
:param input: [B x T x Input_Size]
:param input_pos: [B x T] positions
:param input_lang: [B] language ids of each sample
:param streaming: connect different segments in transformer-xl style
:param kwargs:
:return:
"""
with torch.no_grad():
nan_mask = torch.isnan(input)
if nan_mask.any():
input.masked_fill_(nan_mask, 0)
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
# note that this is actually conv2d so channel=1, f=40
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2) # [bsz, channels, time, f]
# apply CNN
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
input = self.linear_trans(input)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
mask_src = long_mask[:, 0:input.size(1) * 4:4].transpose(0, 1).unsqueeze(0)
# the size seems to be B x T ?
emb = input
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
abs_pos = None
mem_len = 0
mems = None
if self.unidirectional:
qlen = input.size(0)
klen = qlen + mem_len
attn_mask_src = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
pad_mask = mask_src
mask_src = pad_mask + attn_mask_src
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
mask_src = mask_src.gt(0)
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
if not self.no_input_scale:
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
if not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
# B x T x H -> T x B x H
context = emb
if streaming:
hids = [context]
# Apply dropout to both context and pos_emb
context = self.preprocess_layer(context)
# maybe try multiplicative ...
# adding the speech represetation into the first input
if pretrained_layer_states is not None:
context = context + pretrained_layer_states
layer_states = dict()
if self.mpw:
input_lang = self.factor_embeddings(input_lang).squeeze(0)
assert input_lang.ndim == 1
if self.reversible:
context = torch.cat([context, context], dim=-1)
assert streaming is not True, "Streaming and Reversible is not usable yet."
context = ReversibleEncoderFunction.apply(context, pos_emb, self.layer_modules, mask_src)
else:
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
mems_i = mems[i] if mems is not None and streaming and self.max_memory_size > 0 else None
context = layer(context, pos_emb, mask_src, mems=mems_i, src_lang=input_lang, factorize=factorize)
# Summing the context
# if pretrained_layer_states is not None and i == (self.layers - 1):
# context = context + pretrained_layer_states[i]
if streaming:
hids.append(context)
if return_states:
layer_states = context
# final layer norm
context = self.postprocess_layer(context, factor=input_lang)
if self.multilingual_linear_projection:
language_linear_weight_ = torch.index_select(self.linear_proj, 0, input_lang).squeeze(0)
# context = F.linear(context, language_linear_weight_)
t, b = context.size(0), context.size(1)
context = torch.mm(context.view(-1, context.size(-1)), language_linear_weight_)
context = context.view(t, b, context.size(-1))
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask,
'src': input, 'pos_emb': pos_emb})
if return_states:
output_dict['layer_states'] = layer_states
if streaming:
# streaming_state.prev_src_mem_size += sum(input_length.tolist())
# streaming_state.prune_source_memory(self.max_memory_size)
streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class SpeechTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
self.mpw = opt.multilingual_partitioned_weights
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_pos_length = opt.max_pos_length
# build_modules will be called from the inherited constructor
super().__init__(opt, dicts, positional_encoder, language_embeddings,
ignore_source,
allocate_positions=False)
if self.learnable_position_encoding:
self.positional_encoder = None
else:
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# Parameters for the position biases - deprecated. kept for backward compatibility
# self.r_w_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
# self.r_r_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
self.mln = opt.multilingual_layer_norm
if not opt.rezero:
self.postprocess_layer = PrePostProcessing(opt.model_size, opt.dropout, sequence='n', multilingual=self.mln,
n_languages=opt.n_languages)
else:
self.postprocess_layer = Identity()
def renew_buffer(self, new_len):
return
def build_modules(self):
self.death_rate = 0.0
e_length = expected_length(self.layers, self.death_rate)
self.opt.ignore_source = self.ignore_source
opt = self.opt
print("* Speech Transformer Decoder with Relative Attention with %.2f layers" % e_length)
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = RelativeTransformerDecoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None,
src_lang=None, tgt_lang=None, streaming=False, factorize=True, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
mem_len = 0
mems = None
extra_context = None
if self.use_language_embedding:
lang_emb = self.language_embeddings(tgt_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
elif self.encoder_type in ["wav2vec2", "wav2vec2_scp"]:
mask_src = src
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input must be left-aligned
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
dec_attn_mask = dec_attn_mask.bool()
if not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
# pos_emb = self.positional_encoder(pos, bsz=input.size(1))
output = self.preprocess_layer(emb.contiguous())
# pos_emb = self.preprocess_layer(pos_emb)
if self.mpw:
src_lang = self.factor_embeddings(src_lang).squeeze(0)
tgt_lang = self.factor_embeddings(tgt_lang).squeeze(0)
assert src_lang.ndim == 1 and tgt_lang.ndim == 1
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src,
src_lang=src_lang, tgt_lang=tgt_lang, factorize=factorize)
output = self.postprocess_layer(output, factor=tgt_lang)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, streaming=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
src_mask = decoder_state.src_mask if decoder_state.src_mask is not None else None
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
else:
input_ = input.transpose(0, 1) # from B x T to T x B
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
# emb = self.word_lut(input) * math.sqrt(self.model_size)
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
if input.size(0) == 1:
emb[0] = lang_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb = self.positional_encoder(pos)
if self.learnable_position_encoding:
if buffering:
distance_mat = torch.arange(-klen + 1, 1, 1, device=emb.device).unsqueeze(0)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
dec_attn_mask = torch.triu(
emb.new_ones(klen, klen), diagonal=1 + mlen).byte()[:, :, None] # [:, :, None]
if buffering:
dec_attn_mask = dec_attn_mask[-1].unsqueeze(0)
dec_attn_mask = dec_attn_mask.bool()
if context is not None:
if self.encoder_type == "audio":
# The "slow" version of translator only keeps the source mask of audio as src
# Thats why we need to check if the src has already been narrowed before
if src.dim() == 3:
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
elif self.encoder_type == "wav2vec2":
# mask_src = src
mask_src = src_mask
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
if buffering:
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, mask_src,
tgt_lang=lang, src_lang=src_lang,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src,
tgt_lang=lang, src_lang=src_lang)
# normalize and take the last time step
output = self.postprocess_layer(output, factor=lang)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
class RelativeTransformer(Transformer):
def create_decoder_state(self, batch, beam_size=1, type=1, streaming=False, previous_decoding_state=None,
factorize=True,
pretrained_layer_states=None, **kwargs):
"""
Generate a new decoder state based on the batch input
:param factorize:
:param pretrained_layer_states:
:param previous_decoding_state:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
# in this case batch size should be 1
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
src_transposed = src.transpose(0, 1)
if previous_decoding_state is None:
# if the previous stream is None (the first segment in the stream)
# then proceed normally like normal translation
# init a new stream state
streaming_state = self.init_stream()
encoder_output = self.encoder(src_transposed, input_pos=src_pos,
input_lang=src_lang, src_lengths=src_lengths,
streaming=streaming, streaming_state=streaming_state,
factorize=factorize, pretrained_layer_states=pretrained_layer_states)
if streaming:
decoder_state = StreamDecodingState(src, tgt_lang, encoder_output['context'],
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=True, streaming_state=streaming_state)
else:
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'],
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type)
else:
streaming_state = previous_decoding_state.streaming_state
# to have the same batch/beam size with the previous memory ..
src_transposed = src_transposed.repeat(beam_size, 1)
src = src.repeat(1, beam_size)
encoder_output = self.encoder(src_transposed, input_pos=src_pos,
input_lang=src_lang, src_lengths=src_lengths,
streaming=True, streaming_state=streaming_state)
context = encoder_output['context']
if self.decoder.extra_context_size > 0:
# print("Using extra context with extra %d states" % self.decoder.extra_context_size)
# print("")
prev_context = previous_decoding_state.context
extra_context = prev_context[-self.decoder.extra_context_size:].detach()
context = torch.cat([extra_context, context], dim=0)
prev_src = previous_decoding_state.src[-self.decoder.extra_context_size:].detach()
src = torch.cat([prev_src, src], dim=0)
decoder_state = StreamDecodingState(src, tgt_lang, context,
encoder_output['src_mask'],
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=False, streaming_state=streaming_state)
return decoder_state
def init_stream(self):
param = next(self.parameters())
layers = self.decoder.layers
streaming_state = StreamState(layers, self.decoder.max_memory_size, param.device, param.dtype)
return streaming_state
def step(self, input_t, decoder_state, streaming=False):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param streaming:
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
output_dict = self.decoder.step(input_t, decoder_state, streaming=streaming)
output_dict['src'] = decoder_state.src.transpose(0, 1)
log_prob = self.generator[0](output_dict).squeeze(0)
log_prob = F.log_softmax(log_prob, dim=-1, dtype=torch.float32)
coverage = output_dict['coverage']
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def set_memory_size(self, src_memory_size, tgt_memory_size):
self.encoder.max_memory_size = src_memory_size
self.decoder.max_memory_size = tgt_memory_size | 28,183 | 42.293395 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/relative_transformer_layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.multilingual_factorized.linear import MFWPositionWiseFeedForward
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
from onmt.modules.multilingual_factorized.relative_attention import MFWRelativeSelfMultiheadAttn
from onmt.modules.multilingual_partitioned.linear import MPPositionWiseFeedForward
from onmt.modules.multilingual_partitioned.encdec_attention import MPEncdecMultiheadAttn
from onmt.modules.multilingual_partitioned.relative_attention import MPRelativeSelfMultiheadAttn
from onmt.modules.convolution import ConformerConvBlock
from onmt.modules.identity import Identity
class LIDFeedForward(nn.Module):
def __init__(self, *args):
super().__init__()
def preprocessing(rezero, *args, **kwargs):
if rezero:
return Identity()
else:
return PrePostProcessing(*args, **kwargs)
class RelativeTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0, **kwargs):
super(RelativeTransformerEncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.depthwise_conv = opt.depthwise_conv
self.mfw = opt.multilingual_factorized_weights
self.mpw = opt.multilingual_partitioned_weights
self.mln = opt.multilingual_layer_norm
self.no_ffn = opt.no_ffn
self.weight_drop = opt.weight_drop
self.multilingual_adapter = opt.multilingual_adapter
self.adapter_bottleneck_size = opt.adapter_bottleneck_size
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.rezero = opt.rezero
self.learnable_pos = opt.learnable_position_encoding
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
if self.mfw:
assert not self.mpw, "[ERROR] factorized and partitioned weights cannot be used at the same time."
self.preprocess_attn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if not self.no_ffn:
self.preprocess_ffn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if self.mfw:
if not self.no_ffn:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
elif self.mpw:
if not self.no_ffn:
self.feedforward = MPPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
factor_size=opt.mpw_factor_size)
self.multihead = MPRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
factor_size=opt.mpw_factor_size)
else:
if not self.no_ffn:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
if self.depthwise_conv:
self.preprocess_conv = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_conv = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
self.depthwise_conv = ConformerConvBlock(opt.model_size, opt.conv_kernel, bias=True)
else:
self.depthwise_conv = None
if self.multilingual_adapter:
from onmt.modules.multilingual_factorized.multilingual_adapters import MultilingualAdapter
self.adapters = MultilingualAdapter(opt.model_size, opt.adapter_bottleneck_size,
n_languages=opt.n_languages,
dropout=opt.dropout)
def forward(self, input, pos_emb, attn_mask, src_lang=None, factorize=False,
incremental=False, incremental_cache=None, mems=None):
"""
:param factorize:
:param input: tensor [T x B x H]
:param pos_emb: tensor [T x 1 x H]
:param attn_mask: tensor [1 x T x B]
:param src_lang: tensor [B] or None
:param incremental: None
:param incremental_cache:
:param mems: None
:return:
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang, factorize=factorize)
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
"""
Self-attention block
"""
query = self.preprocess_attn(input, factor=src_lang)
if self.mfw or self.mpw:
out, _ = self.multihead(query, pos_emb, src_lang, attn_mask, None, factorize=factorize,
incremental=incremental, incremental_cache=incremental_cache, )
else:
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
"""
Convolution block
"""
if self.depthwise_conv:
if attn_mask is not None and attn_mask.any():
conv_mask = attn_mask.squeeze(0).unsqueeze(-1)
else:
conv_mask = None
out = self.depthwise_conv(self.preprocess_conv(input, factor=src_lang), pad_mask=conv_mask)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_conv(out, input)
"""
Feed forward layer
"""
if not self.no_ffn:
out = self.feedforward(self.preprocess_ffn(input, factor=src_lang), src_lang, factorize=factorize)
# rescaling before residual
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
if self.multilingual_adapter:
input = self.adapters(input, src_lang)
if incremental:
return input, incremental_cache
return input
class RelativeTransformerDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0, lid_net=None):
super(RelativeTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.mfw = opt.multilingual_factorized_weights
self.mpw = opt.multilingual_partitioned_weights
self.mln = opt.multilingual_layer_norm
self.weight_drop = opt.weight_drop
self.multilingual_adapter = opt.multilingual_adapter
self.adapter_bottleneck_size = opt.adapter_bottleneck_size
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.rezero = opt.rezero
self.learnable_pos = opt.learnable_position_encoding
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.preprocess_attn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
if not self.ignore_source:
self.preprocess_src_attn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_src_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.mfw:
self.multihead_src = MFWEncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
elif self.mpw:
self.multihead_src = MPEncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout,
factor_size=opt.mpw_factor_size)
else:
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
self.preprocess_ffn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_ffn = PrePostProcessing(opt.model_size, self.residual_dropout, sequence='dz' if self.rezero else 'da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if self.mfw:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead_tgt = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
elif self.mpw:
self.feedforward = MPPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
factor_size=opt.mpw_factor_size)
self.multihead_tgt = MPRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
factor_size=opt.mpw_factor_size)
else:
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
# self.lfv_multilingual = opt.lfv_multilingual
#
# if opt.lfv_multilingual:
# self.lid_net = lid_net
# self.lfv_mapper = nn.Linear(opt.bottleneck_size, opt.model_size)
# else:
# self.lid_net = None
# self.lfv_mapper = None
if self.multilingual_adapter:
from onmt.modules.multilingual_factorized.multilingual_adapters import MultilingualAdapter
self.adapters = MultilingualAdapter(opt.model_size, opt.adapter_bottleneck_size,
n_languages=opt.n_languages,
dropout=opt.dropout)
def forward(self, input, context, pos_emb, mask_tgt=None, mask_src=None,
src_lang=None, tgt_lang=None,
incremental=False, incremental_cache=None, reuse_source=True, mems=None, factorize=False):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems, factor=tgt_lang)
else:
mems = None
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang, factorize=factorize)
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
query = self.preprocess_attn(input, factor=tgt_lang)
if self.mfw or self.mpw:
out, _ = self.multihead_tgt(query, pos_emb, tgt_lang, None, mask_tgt, factorize=factorize,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input, factor=tgt_lang)
incremental_source = incremental and reuse_source
if self.mfw or self.mpw:
out, coverage = self.multihead_src(query, context, context, tgt_lang, tgt_lang, mask_src,
factorize=factorize,
incremental=incremental_source,
incremental_cache=incremental_cache)
else:
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, factor=tgt_lang), tgt_lang, factorize=factorize)
# rescaling before residual
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
if self.multilingual_adapter:
input = self.adapters(input, tgt_lang)
else:
coverage = None
return input, coverage, incremental_cache | 24,391 | 52.026087 | 127 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/mssm/mhs4.py | #!/usr/bin/env python3
from typing import Optional, List, Tuple, Union
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.nn.functional as F
from torch import Tensor
# import pykeops
# import pykeops.torch
# from pykeops.torch import LazyTensor
from einops import rearrange, repeat
from opt_einsum import contract
from torch.cuda.amp import autocast
try:
from .ssm_kernel.ssm_kernel_coefficient import compute_kernel_coefficient
except ImportError:
from ssm_kernel.ssm_kernel_coefficient import compute_kernel_coefficient
@torch.no_grad()
def bilinear_discretization(
A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, D: torch.Tensor, T: torch.Tensor
):
"""
Performs a bilinear transformation of the (diagonal + lowrank) transition A and input matrix B.
For a given tensor of N different time steps, this function will return N approximations to A and B.
Parameters:
A: shape (Q, N, N)
B: shape (Q, N)
C: shape (Q, C, H, N)
D: shape (Q, C, H)
T: shape (Q, H)
Returns:
dA: shape (Q, H, N, N)
dB: shape (Q, H, N)
dC: shape (Q, C, H, N)
dD: shape (Q, C, H)
"""
# Factor term reused for A and B
factor = 0.50 * contract("qh,qnm->qhnm", T, A)
# Get identity (1, N, N)
identity = torch.eye(A.size(-1)).to(A).unsqueeze(0).unsqueeze(0)
# Correction term
correction = torch.linalg.inv(identity - factor)
# Get bilinear A and B
dA = contract("qhnm,qhmk->qhnk", correction, identity + factor)
dB = contract("qhnm,qh,qm->qhn", correction, T, B)
return dA, dB, C, D
def get_activation(act: str = "gelu"):
if act == "relu":
return nn.ReLU()
if act == "gelu":
return nn.GELU()
if act == "swish":
return nn.SiLU()
if act == "glu":
return nn.GLU()
return nn.Identity()
def gen_noisy_linear_weights(parameter_noise, weight):
"""Get Gaussian noisy linear weights based on given noise level ....
and the weights themselves. The noise are normalized per channel (dim=1).
InputArgs:
parameter_noise: float, noise level, [0.0, 1.0]
weight: Tensor, a weight tensor of a matrix
Return:
noisy_weight: Tensor, same dimension as weight, but with noise added.
"""
noise = torch.randn_like(weight).to(device=weight.device)
normalized_noise = noise / torch.norm(noise, dim=1, keepdim=True)
w_norm = torch.norm(weight, dim=1, keepdim=True).detach()
scale = parameter_noise * w_norm
noisy_weight = weight + scale * normalized_noise
return noisy_weight
class Linear(torch.nn.Linear):
def __init__(
self,
input_dim,
output_dim,
bias=True,
parameter_noise: float = 0.0,
device=None,
dtype=None,
):
super(Linear, self).__init__(
in_features=input_dim,
out_features=output_dim,
bias=bias,
device=device,
dtype=dtype,
)
# mirror torch.nn.linear to set device and detype
self.parameter_noise = parameter_noise
def get_noisy_weight(self, weight):
if self.parameter_noise > 0.0 and self.training:
return gen_noisy_linear_weights(self.parameter_noise, weight)
return weight
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.get_noisy_weight(self.weight), self.bias)
class TiedStateSpaceModel(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int = 64,
num_heads: int = 1,
channels: int = 1,
scale: float = 0.50,
maxlen: int = 256,
timestep_min: float = 0.010,
timestep_max: float = 0.160,
parameter_noise: float = 0.00,
use_fast_kernel: bool = True,
create_on_gpu=True
):
super().__init__()
# Store inputs
self.input_dim = self.H = input_dim # input dimensions
self.hidden_dim = self.N = hidden_dim # N = number of SSM copies?
self.num_heads = self.Q = num_heads # Q = number of heads
self.channels = self.C = channels
self.parameter_noise = parameter_noise
# Create diagonal transition matrix
self.diagonal = nn.Parameter(
math.log(scale) + torch.randn(num_heads, hidden_dim)
)
if create_on_gpu:
self.diagonal.data = self.diagonal.data.cuda()
# print(self.diagonal.device)
device = self.diagonal.device
# Create lowrank correction
self.lowrank = nn.Parameter(torch.randn(num_heads, hidden_dim)).to(device)
# Create discretization step per channel
self.timestep = nn.Parameter(
torch.rand(num_heads, input_dim)
* (math.log(timestep_max) - math.log(timestep_min))
+ math.log(timestep_min)
).to(device)
# Initialise remaining parameters
self.register("input_matrix", (num_heads, hidden_dim), dim=1, device=device)
self.register(
"output_matrix",
(num_heads, self.channels, input_dim, hidden_dim),
dim=hidden_dim,
device=device
)
self.register("skip_matrix", (num_heads, channels, input_dim), dim=1, device=device)
# Register omega parameter
self.setup(maxlen, dtype=torch.cfloat, device=self.diagonal.device)
self.use_fast_kernel = use_fast_kernel
def register(self, name, size, dim, lr=None, device=None):
# Random uniform initialization
weight = torch.rand(*size).to(device)
weight = (2 * weight - 1) / math.sqrt(dim)
# Register trainable parameter
self.register_parameter(name, nn.Parameter(weight))
# Add learning rate
optim = {}
if lr is not None:
optim["lr"] = lr
if len(optim) > 0:
setattr(getattr(self, name), "_optim", optim) # noqa
@torch.no_grad()
def get_correction_factor(self, double=False):
# Get the parameters which are transformed (do not use noisy training on these params)
d = self.get_diagonal() # (Q, N)
p = self.get_lowrank() # (Q, N)
t = self.get_timestep() # (Q, H)
identity = torch.eye(self.hidden_dim).to(d).unsqueeze(0).unsqueeze(0)
# Get continous matrix (H, N, N)
A = 0.50 * contract("qh,qnm->qhnm", t, self.get_transition(d, p))
# Get discretized A naively
# print("Solving dA = solve(identity - A, identity + A)", A.size(), A.type(), A.device)
dA = torch.linalg.solve(identity - A, identity + A)
# Correction factor
# Get identity (1, N, N)
if double:
return identity + torch.matrix_power(dA, self.maxlen)
return identity - torch.matrix_power(dA, self.maxlen)
@torch.no_grad()
def setup(self, maxlen, dtype, device, double=False):
"""
Calculate (and cache) FFT nodes and their "unprocessed" them with the bilinear transform
This should be called everytime the internal length changes
"""
# Update internal length
self.maxlen = maxlen
# Get the correction matrix (H, N, N)
correction = self.get_correction_factor(double)
# Now correct for the length by modifying the output matrix using every input channel
# Do not call the get_output_matrix to avoid noise injection
weight = self.output_matrix.data
weight = contract("qchn,qhnk->qchk", weight, correction).contiguous()
self.output_matrix.data = weight
# Double length if a sequence has been encountered with longer than supported length
if double:
self.maxlen *= 2
self.setup_omega_z(dtype, device)
@torch.no_grad()
def setup_omega_z(self, dtype, device):
self.L = self.maxlen
# Create array on the unit circle
omega = torch.tensor(
np.exp(-2j * np.pi / self.maxlen), dtype=dtype, device=device
)
omega = omega ** torch.arange(self.maxlen, device=device)
# Create the bilinear transformation
z = 2 * (1 - omega) / (1 + omega)
# Store these for faster computation
self.register_buffer("omega", torch.view_as_real(omega))
# define self.z here
self.register_buffer("z", torch.view_as_real(z))
@torch.no_grad()
def setup_linear(self):
"""
This computes the factors necessary to run the recurrent form efficiently.
"""
# Update the output matrix for correction
correction = self.get_correction_factor()
correction = torch.linalg.inv(correction)
# Now correct for the length by modifying the output matrix using every head
# Do not call the get_output_matrix to avoid noise injection
weight = self.output_matrix.data # (..., HN) -> (H, ..., N)
weight = contract("qchn,qhnk->qchk", weight, correction).contiguous()
self.output_matrix.data = weight
# Get all quantities
d = self.get_diagonal() # (Q, N)
p = self.get_lowrank() # (Q, N)
t = self.get_timestep() # (Q, H)
# For the A0 matrix
d0 = 2 / t.unsqueeze(-1) + d.unsqueeze(-2)
f0 = repeat(p, "q n -> q h n", h=self.input_dim)
s0 = 1.0
# For the A1 matrix
d1 = 1 / (2 / t.unsqueeze(-1) - d.unsqueeze(-2))
f1 = d1 * p.unsqueeze(-2)
s1 = 1 / (1 + contract("qhn,qhn,qhn->qh", f0, d1, f0)).unsqueeze(-1)
# Compute the discretized states
dA, dB, dC, dD = bilinear_discretization(
self.get_transition(),
self.input_matrix,
self.output_matrix,
self.skip_matrix,
self.get_timestep(),
)
self.linear_params = {
"d0": d0, # (Q, H, N)
"d1": d1, # (Q, H, N)
"f0": f0, # (Q, H, N)
"f1": f1, # (Q, H, N)
"s0": s0, # (1)
"s1": s1, # (Q, H, 1)
"dA": dA, # (Q, H, N, N)
"dB": dB, # (Q, H, N)
"dC": dC, # (Q, C, H, N)
"dD": dD, # (Q, C, H)
}
def get_noisy_weight(self, weight):
if self.parameter_noise > 0.0 and self.training:
return gen_noisy_linear_weights(self.parameter_noise, weight)
return weight
def get_diagonal(self):
return -torch.exp(self.diagonal)
def get_lowrank(self):
return self.lowrank
def get_transition(self, d=None, p=None):
d = d if d is not None else self.get_diagonal()
p = p if p is not None else self.get_lowrank()
return torch.diag_embed(d) - contract("qm,qn->qmn", p, p)
def get_timestep(self):
return torch.exp(self.timestep)
def get_input_matrix(self):
return self.get_noisy_weight(self.input_matrix) # (Q, H)
def get_output_matrix(self):
return self.get_noisy_weight(self.output_matrix) # (Q, C, H, N)
def get_skip_matrix(self):
return self.get_noisy_weight(self.skip_matrix) # (Q, C, H)
def get_dwoodbury(self, z, d, invt):
# Get the bilinear transformation
z = contract("l,qh->qlh", torch.view_as_complex(z), invt)
# Compute the term and reuse computations (Q, L, H, N)
return 1 / (z.unsqueeze(-1) - d.unsqueeze(-2).unsqueeze(-2))
def compute_slow(self, z, d, t, b, c):
# Get the diagonal component in the woodbury computation
# which will be reused in computing the kernel
# z is forced to be fp32
# the following prevents fp16 underflow, particularly on t
if t.dtype == torch.float16:
t = t.to(z.dtype)
b = b.to(z.dtype)
c = c.to(z.dtype)
d = d.to(z.dtype)
# Get the memory heavy denominator
r = self.get_dwoodbury(z, d, 1 / t) # (Q, L, H, N)
# Compute kernel coeffs
kernelcc = contract("qihn,qlhn,qchn->qiclh", b.to(r.dtype), r, c)
return kernelcc
def get_kernel(self):
# Get the parameters which are transformed
d = self.get_diagonal() # (Q, N)
t = self.get_timestep() # (Q, H)
# Get the lowrank contribution and input matrix
p = self.get_lowrank() # (Q, N)
b = self.get_input_matrix() # (Q, H)
c = self.get_output_matrix() # (Q, C, H, N)
# Since we have tied states
b = repeat(b, "q n -> q 1 h n", h=self.input_dim) # (Q, 1, H, N)
p = repeat(p, "q n -> q 1 h n", h=self.input_dim) # (Q, 1, H, N)
# For batched operations
b = torch.cat([b, p], dim=1) # (Q, 2, H, N)
c = torch.cat([c, p], dim=1) # (Q, C + 1, H, N)
# Get the diagonal component in the woodbury computation
# which will be reused in computing the kernel
# r = self.get_dwoodbury(d, 1 / t) # (Q, L, H, N)
# Compute kernel coeffs
# kernelcc = contract("qihn,qlhn,qchn->qiclh", b.to(r.dtype), r, c)
# Compute kernel coeffs
# kernelcc = self.compute_slow(self.z, d, t, b, c)
# print(self.z.type(), d.type(), t.type(), b.type(), c.type())
kernelcc = compute_kernel_coefficient(self.z, d, t, b, c, fast=self.use_fast_kernel)
# Compute kernel assuming low rank of 1 (Q, 2, C, L, H) -> (Q, 1, C, L, H)
unit = 2 / (1 + torch.view_as_complex(self.omega))
kernel = kernelcc[:, :-1, :-1] - kernelcc[:, -1:, :-1] * kernelcc[
:, :-1, -1:
] / (1 + kernelcc[:, -1:, -1:])
kernel = kernel.squeeze(1) # (Q, C, L, H)
kernel = contract("l,qclh->lqch", unit, kernel)
kernel = torch.fft.irfft(kernel, n=kernel.size(0), dim=0)
return kernel.float()
"""
def get_kernel_lazy(self):
# Get the parameters which are transformed
d = self.get_diagonal() # (Q, N)
t = self.get_timestep() # (Q, H)
# Get the input and output matrix
b = self.get_input_matrix() # (Q, N)
c = self.get_output_matrix() # (Q, C, H, N)
# Force values to be fp32
if t.dtype == torch.float16:
t = t.to(self.z.dtype)
b = b.to(self.z.dtype)
c = c.to(self.z.dtype)
d = d.to(self.z.dtype)
# Map to lazy vectors for memory efficient computation
d = LazyTensor(d.view(self.Q, 1, self.N, 1, 1))
t = LazyTensor(t.view(self.Q, 1, 1, 1, self.H))
b = LazyTensor(b.view(self.Q, 1, self.N, 1, 1))
c = LazyTensor(
c.view(self.Q, self.C, self.H, 1, self.N).transpose(2, 4).contiguous()
)
# Complex Lazy Tensors
z = torch.view_as_complex(self.z)
z = LazyTensor(z.view(1, 1, 1, self.L, 1))
o = 2 / (1 + torch.view_as_complex(self.omega))
o = LazyTensor(o.view(1, 1, 1, self.L, 1))
# Compute the kernel (Q, C, N, L, H)
kernel = o * b * c / (z / t - d)
kernel = kernel.sum(dim=2)
kernel = torch.fft.irfft(kernel, n=kernel.size(-2), dim=-2)
return kernel.permute(2, 0, 1, 3).contiguous().float()
"""
# do we need masking?
def forward(self, u: torch.Tensor):
# Get sequence length (L, B, Q, H)
length = u.size(0)
# Double length if needed
while length > self.maxlen:
self.setup(
self.maxlen,
dtype=torch.cfloat,
device=self.diagonal.device,
double=True,
)
# print(self.z.dtype)
# This would be call only once at the beginning of fp16 training
if self.z.dtype == torch.float16:
self.setup_omega_z(dtype=torch.cfloat, device=self.diagonal.device)
# For FP16 conversion
fp16 = u.dtype == torch.float16
# Perform state space modelling (L, Q, C, H)
k = self.get_kernel()[:length] # get kernel always in fp32?
# print("kernel type", k.type())
# k = self.get_kernel_lazy()[:length]
# Now compute the fourier transform
# breakpoint()
# k = k.type_as(u)
k_f = torch.fft.rfft(k.float(), n=2 * length, dim=0)
uu = u.to(torch.float32) if fp16 else u
u_f = torch.fft.rfft(uu, n=2 * length, dim=0)
x_f = contract("lqch,lbqh->lbqch", k_f, u_f)
# print("fourier dtype", k_f.type(), u_f.type())
# Get the output without transformation or skip connection
x = torch.fft.irfft(x_f, n=2 * length, dim=0)[:length]
x = x.to(torch.float16) if fp16 else x
# Get the full output
return x + contract("qch,lbqh->lbqch", self.get_skip_matrix(), u)
class MHS4(nn.Module):
def __init__(
self,
input_dim: int,
output_dim: Optional[int] = None,
projection_dim: Optional[int] = None,
hidden_dim: int = 64,
num_heads: int = 1,
activation: Optional[str] = "gelu",
channels: int = 1,
rank: int = 1,
scale: float = 0.50,
maxlen: int = 256,
timestep_min: float = 0.010,
timestep_max: float = 0.160,
dropout: float = 0.00,
use_final_linear: bool = True,
parameter_noise: float = 0.00,
use_fast_kernel: bool = True,
create_on_gpu: bool = True
):
super().__init__()
# Only a rank of 1 is supported
assert rank == 1
# Store inputs
self.input_dim = input_dim
self.output_dim = output_dim or input_dim
self.projection_dim = projection_dim or input_dim // num_heads
self.hidden_dim = hidden_dim
self.num_heads = num_heads
self.channels = channels
self.parameter_noise = parameter_noise
# GLU activation requires double the channels
glu = activation == "glu"
# Increase number of channels for glu
self.channels *= 2 if glu else 1
# Input is divisible by number of heads
assert self.input_dim % self.num_heads == 0
# Projection layer
self.projweight, self.projbias = (
self.init_linear(
sizew=(self.num_heads, self.projection_dim, input_dim),
sizeb=(self.num_heads, self.projection_dim),
)
if self.num_heads > 1
else (None, None)
)
# SSM Layer
self.ssm = TiedStateSpaceModel(
input_dim=self.projection_dim,
hidden_dim=hidden_dim,
num_heads=num_heads,
channels=self.channels,
scale=scale,
maxlen=maxlen,
timestep_min=timestep_min,
timestep_max=timestep_max,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
create_on_gpu=create_on_gpu
)
# Dropout and activation following ssm
self.activation = get_activation(activation)
self.dropout = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
# Final linear layer weight
self.out = (
Linear(
input_dim=self.projection_dim * self.num_heads,
output_dim=self.output_dim,
parameter_noise=parameter_noise,
)
if use_final_linear
else nn.Identity()
)
def init_linear(self, sizew, sizeb):
# Weight matrix
weight = nn.Parameter(torch.empty(sizew))
init.kaiming_uniform_(weight, a=math.sqrt(5))
# Bias vector
bias = nn.Parameter(torch.empty(sizeb))
fan_in, _ = init._calculate_fan_in_and_fan_out(weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(bias, -bound, bound)
return weight, bias
def get_noisy_weight(self, weight):
if self.parameter_noise > 0.0:
return gen_noisy_linear_weights(self.parameter_noise, weight)
return weight
@torch.no_grad()
def setup(self, maxlen, dtype, device, double=False):
self.ssm.setup(maxlen, dtype, device, double=double)
@torch.no_grad()
def setup_linear(self):
self.ssm.setup_linear()
def projection_linear(self, x):
# Input of shape (L, B, H) -> (L, B, Q, H)
if self.projweight is None:
return x.unsqueeze(-2)
# Noisy training
projweight = self.get_noisy_weight(self.projweight)
projbias = self.get_noisy_weight(self.projbias)
l, b, n = x.size(0), x.size(1), x.size(2)
q, k = projweight.size(0), projweight.size(1)
# this op is cast to fp16
out1 = torch.mm(x.view(l * b, n), projweight.view(q * k, n).transpose(0, 1).contiguous())
# this op always outputs float32
out = out1.view(l, b, q, k).add_(projbias.type_as(out1))
return out
# return contract("qkn,lbn->lbqk", projweight, x) + projbias
def forward(self, u: torch.Tensor):
# Assumes the input is of shape (L, B, H)
u = self.projection_linear(u)
u = self.ssm(u)
u = rearrange(u, "l b q c h -> l b (q c h)")
u = self.dropout(self.activation(u))
u = self.out(u)
return u
def build_stacked_mh_s4(
num_layers: int = 1,
only_activate_last: bool = False,
input_dim: int = 512,
intermediate_dim: int = 512,
output_dim: Optional[int] = None,
hidden_dim: int = 32,
num_heads: int = 1,
activation: str = "gelu",
channels: int = 1,
rank: int = 1,
scale: float = 0.50,
maxlen: int = 256,
timestep_min: float = 0.010,
timestep_max: float = 0.160,
dropout: float = 0.10,
remove_final_linear: bool = False,
parameter_noise: float = 0.00,
use_fast_kernel: bool = True,
create_on_gpu = True
):
# Build all layers sequentially
layers = []
# Decide on output dimension
output_dim = output_dim or input_dim
# Starting first layer build with activation if single layer or activated when stacked
use_activation = num_layers == 1 or not only_activate_last
# Do not use final linear layer if we have multiple heads in stacked mode since there's a following projection
# This is also to reduce the number of parameters
use_final_linear = (num_heads == 1) or (num_layers == 1)
layers.append(
MHS4(
input_dim=input_dim,
output_dim=intermediate_dim if num_layers > 1 else output_dim,
hidden_dim=hidden_dim,
num_heads=num_heads,
activation=activation if use_activation else None,
channels=channels,
rank=rank,
scale=scale,
maxlen=maxlen,
timestep_min=timestep_min,
timestep_max=timestep_max,
dropout=dropout,
use_final_linear=use_final_linear,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
create_on_gpu=create_on_gpu
)
)
# Intermediate layers
# Ensure each head dimension is consistent
assert intermediate_dim % num_heads == 0
for i in range(num_layers - 2):
layers.append(
MHS4(
input_dim=input_dim
if (not use_final_linear and i == 0)
else intermediate_dim,
output_dim=intermediate_dim,
projection_dim=intermediate_dim // num_heads,
hidden_dim=hidden_dim,
num_heads=num_heads,
activation=activation if use_activation else None,
channels=channels,
rank=rank,
scale=scale,
maxlen=maxlen,
timestep_min=timestep_min,
timestep_max=timestep_max,
dropout=dropout,
use_final_linear=use_final_linear,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
)
)
# Final layer, requires larger projection layers for higher intermediate projections
# Ensure that the output is divisible
assert output_dim % num_heads == 0
if num_layers > 1:
layers.append(
MHS4(
input_dim=input_dim
if (not use_final_linear and num_layers == 2)
else intermediate_dim,
output_dim=output_dim,
projection_dim=intermediate_dim // num_heads,
hidden_dim=hidden_dim,
num_heads=num_heads,
activation=activation,
channels=channels,
rank=rank,
scale=scale,
maxlen=maxlen,
timestep_min=timestep_min,
timestep_max=timestep_max,
dropout=dropout,
use_final_linear=True,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
)
)
# Get the final layer and remove its linear layer if needed
if remove_final_linear:
assert (
intermediate_dim == input_dim
), "Removing the final linear layer is only allowed when the intermediate dimension matches the input"
layers[-1].out = nn.Identity()
return nn.Sequential(*layers)
class BasicBlock(nn.Module):
def __init__(
self,
input_dim,
main_module,
dropout=0.0,
):
super().__init__()
self.ln = nn.LayerNorm(input_dim)
self.dp = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.main_module = main_module
def forward(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
# Assume the input takes shape (T, B, D)
# This makes input -> LayerNorm -> main_module -> dropout -> Residual(+input)
output = self.ln(input)
output = self.main_module(output)
output = self.dp(output)
output = output + input
return output, lengths, []
class BidirectionalBasicBlock(nn.Module):
def __init__(
self,
input_dim,
forward_module,
backward_module,
dropout=0.0,
parameter_noise=0.0,
residual_norm=True,
):
super().__init__()
if residual_norm:
self.ln = nn.LayerNorm(input_dim)
self.dp = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
else:
self.ln = self.dp = None
self.forward_module = forward_module
self.backward_module = backward_module
self.linear = Linear(
input_dim=input_dim * 2,
output_dim=input_dim,
parameter_noise=parameter_noise,
)
def reverse_padded_sequence(self, input, lengths):
# return input.flip(dims=[0])
# Assuming input is of shape BTD
output = torch.zeros_like(input)
for i, length in enumerate(lengths):
output[:length, i] = input[:length, i].flip(0)
return output
def forward(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
# Assume the input takes shape (T, B, D)
if self.ln is not None:
output = self.ln(input)
else:
output = input
output_flip = self.reverse_padded_sequence(output, lengths)
# Forward/backward module
f_output = self.forward_module(output)
b_output = self.backward_module(output_flip)
b_output_flip = self.reverse_padded_sequence(b_output, lengths)
# Concatenation and reduction to correct dim (B, T, D)
output = torch.cat([f_output, b_output_flip], dim=-1)
if self.ln is not None:
output = self.dp(self.linear(output))
output = output + input
else:
output = self.linear(output)
return output, lengths, []
# For backward compatibility
class mySequentialv2(nn.ModuleList):
def forward(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
for module in self._modules.values():
input, lengths, state = module(input, lengths, state)
return input, lengths, state
class MHBiS4Layer(nn.Module):
def __init__(
self,
input_dim: int = 512,
mssm_num_modules: int = 1, # what is this?
mssm_num_stacks: int = 2,
mssm_only_activate_last: bool = False,
mssm_intermediate_dim: int = 512,
mssm_hidden_dim: int = 32,
mssm_num_heads: int = 1,
mssm_activation: str = "gelu",
mssm_rank: int = 1,
mssm_scale: float = 0.50,
mssm_maxlen: int = 256,
mssm_timestep_min: float = 0.010,
mssm_timestep_max: float = 0.160,
mssm_dropout: float = 0.10,
mssm_remove_final_linear: bool = False,
ffn_activation: str = "gelu",
ffn_dim: int = 2048,
ffn_dropout: float = 0.10,
parameter_noise: float = 0.00,
use_fast_kernel: bool = True,
s4_only=False,
create_on_gpu=True
):
super().__init__()
forward_ssm_modules = [
build_stacked_mh_s4(
num_layers=mssm_num_stacks,
only_activate_last=mssm_only_activate_last,
input_dim=input_dim,
intermediate_dim=mssm_intermediate_dim,
output_dim=input_dim,
hidden_dim=mssm_hidden_dim,
num_heads=mssm_num_heads,
activation=mssm_activation,
rank=mssm_rank,
scale=mssm_scale,
maxlen=mssm_maxlen,
timestep_min=mssm_timestep_min,
timestep_max=mssm_timestep_max,
dropout=mssm_dropout,
remove_final_linear=mssm_remove_final_linear,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
create_on_gpu=create_on_gpu
)
for _ in range(mssm_num_modules)
]
backward_ssm_modules = [
build_stacked_mh_s4(
num_layers=mssm_num_stacks,
only_activate_last=mssm_only_activate_last,
input_dim=input_dim,
intermediate_dim=mssm_intermediate_dim,
output_dim=input_dim,
hidden_dim=mssm_hidden_dim,
num_heads=mssm_num_heads,
activation=mssm_activation,
rank=mssm_rank,
scale=mssm_scale,
maxlen=mssm_maxlen,
timestep_min=mssm_timestep_min,
timestep_max=mssm_timestep_max,
dropout=mssm_dropout,
remove_final_linear=mssm_remove_final_linear,
parameter_noise=parameter_noise,
use_fast_kernel=use_fast_kernel,
create_on_gpu=create_on_gpu
)
for _ in range(mssm_num_modules)
]
self.ssm_block = mySequentialv2(
[
BidirectionalBasicBlock(
input_dim=input_dim,
forward_module=fmodule,
backward_module=bmodule,
dropout=mssm_dropout,
parameter_noise=parameter_noise,
residual_norm=not s4_only
)
for fmodule, bmodule in zip(forward_ssm_modules, backward_ssm_modules)
]
)
if not s4_only:
ffn_module = nn.Sequential(
Linear(
input_dim=input_dim,
output_dim=ffn_dim * (2 if ffn_activation == "glu" else 1),
parameter_noise=parameter_noise,
),
get_activation(ffn_activation),
nn.Dropout(ffn_dropout) if ffn_dropout > 0.0 else nn.Identity(),
Linear(
input_dim=ffn_dim,
output_dim=input_dim,
parameter_noise=parameter_noise,
),
)
self.ffn_block = BasicBlock(
input_dim=input_dim, main_module=ffn_module, dropout=ffn_dropout
)
else:
self.ffn_block = None
def forward(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
output = input
output, _, _ = self.ssm_block(output, lengths, state)
if self.ffn_block is not None:
output, _, _ = self.ffn_block(output, lengths, state)
return output, lengths, []
class MHBiS4EncoderLayer(nn.Module):
def __init__(self, cfg, s4_only=False, create_on_gpu=True):
super().__init__()
self.module = self.build_module(cfg, s4_only=s4_only, create_on_gpu=create_on_gpu)
def build_module(self, cfg, s4_only=False, create_on_gpu=True):
return MHBiS4Layer(
input_dim = cfg.encoder_embed_dim,
mssm_num_modules = 1,
mssm_num_stacks = cfg.encoder_mssm_num_stacks,
mssm_only_activate_last = False,
mssm_intermediate_dim = cfg.encoder_embed_dim,
mssm_hidden_dim = cfg.encoder_mssm_hidden_dim,
mssm_num_heads = cfg.encoder_mssm_num_heads,
mssm_activation = cfg.encoder_mssm_activation,
mssm_rank = 1,
mssm_scale = cfg.encoder_mssm_scale,
mssm_maxlen = cfg.encoder_mssm_maxlen,
mssm_timestep_min = cfg.encoder_mssm_timestep_min,
mssm_timestep_max = cfg.encoder_mssm_timestep_max,
mssm_dropout = cfg.dropout,
mssm_remove_final_linear = True,
ffn_activation = cfg.activation_fn,
ffn_dim = cfg.encoder_ffn_embed_dim,
ffn_dropout = cfg.relu_dropout or 0,
parameter_noise = 0.00,
use_fast_kernel = True , # Why?
s4_only=s4_only,
create_on_gpu=create_on_gpu
)
@torch.no_grad()
def infer_lengths(self, batch, maxlen, encoder_padding_mask: Optional[Tensor]):
# Assume non padding elements are part of sequence
lengths = (encoder_padding_mask.ne(1)).sum(-1)
return lengths.to(int)
def forward(
self,
x,
encoder_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
attn_bias: Optional[Tensor] = None # relative position encoding
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
bsz, seq_len = x.size(1), x.size(0)
if encoder_padding_mask is None:
encoder_padding_mask = x.new_zeros(bsz, seq_len)
lengths = self.infer_lengths(
batch = x.size(1),
maxlen = x.size(0),
encoder_padding_mask=encoder_padding_mask,
)
x, _, _ = self.module(x, lengths)
return x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
pass
if __name__ == "__main__":
import json
from types import SimpleNamespace
from random import randint
def json_to_namespace(json_file):
with open(json_file) as f:
x = json.load(f, object_hook=lambda d: SimpleNamespace(**d))
for name in x.__dict__:
if x.__dict__[name] in ['False', 'True']:
x.__dict__[name] = (x.__dict__[name] == 'True')
return x
cfg = json_to_namespace("mssm_config.json")
s4_layer = MHBiS4EncoderLayer(cfg, s4_only=True, create_on_gpu=True)
print(s4_layer)
s4_layer = s4_layer.cuda()
t = 512
b = 16
h = 1024
x = torch.randn(*(t, b, h)).cuda()
mask = torch.ones(*(b, t), dtype=torch.bool)
for i in range(b):
l = randint(t//2, t)
mask[i][0:l].fill_(0)
x = x.half()
print(x.size(), x.type())
with autocast(enabled=True, dtype=torch.float16):
output = s4_layer(x, mask)
print(output.size())
print(output.sum())
n_params = 0
for param in s4_layer.parameters():
n_params += param.numel()
print(n_params)
print(n_params * 24 ) | 37,369 | 32.515695 | 114 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/mssm/fft_convolution.py | import torch
| 14 | 4 | 12 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/mssm/ssm_kernel/ssm_kernel_coefficient.py | #!/usr/bin/env python3
import torch
from opt_einsum import contract
import os
import pathlib
import ssm_kernel_coefficient_cuda
# from torch.utils.cpp_extension import load
# ssm_kernel_coefficient_binding = load(
# name="ssm_kernel_coefficient_binding",
# sources=[
# os.path.join(
# pathlib.Path(__file__).parent.resolve(),
# "ssm_kernel_coefficient_binding_cuda.cu"
# )
# ],
# verbose = True
# )
# pyre-ignore
# from ssm_kernel_coefficient_binding import (
# kernel_coefficient_backward_double,
# kernel_coefficient_backward_float,
# kernel_coefficient_forward_double,
# kernel_coefficient_forward_float,
# )
def compute_kernel_coefficient(z, d, t, b, c, fast=False):
if not fast or not z.is_cuda:
return compute_slow(z, d, t, b, c)
return compute_fast(z, d, t, b, c)
def get_dwoodbury(z, d, invt):
# Get the bilinear transformation
z = contract("l,qh->qlh", torch.view_as_complex(z), invt)
# Compute the term and reuse computations (Q, L, H, N)
return 1 / (z.unsqueeze(-1) - d.unsqueeze(-2).unsqueeze(-2))
def compute_slow(z, d, t, b, c):
# Get the diagonal component in the woodbury computation
# which will be reused in computing the kernel
# z is forced to be fp32
# the following prevents fp16 underflow, particularly on t
if t.dtype == torch.float16:
t = t.to(z.dtype)
b = b.to(z.dtype)
c = c.to(z.dtype)
d = d.to(z.dtype)
r = get_dwoodbury(z, d, 1 / t) # (Q, L, H, N)
# Compute kernel coeffs
kernelcc = contract("qihn,qlhn,qchn->qiclh", b.to(r.dtype), r, c)
return kernelcc
def compute_fast(z, d, t, b, c):
# z is forced to be fp32
# the following prevents fp16 underflow, particularly on t
fp16 = (t.dtype == torch.float16)
if t.dtype == torch.float16:
t = t.to(z.dtype)
b = b.to(z.dtype)
c = c.to(z.dtype)
zz = contract("l,qh->qlh", torch.view_as_complex(z), 1 / t) # (Q, L, H)
bc = contract("qihn,qchn->icqhn", b, c).to(zz.dtype) # (I, C, Q, H, N)
I, C, Q, H, N = bc.shape
bc = bc.view(-1, Q, H, N)
L = zz.shape[1]
d = d.to(zz.dtype) # (Q, N)
coeff = KernelCoefficientFast.apply(bc, zz, d) # (IC, Q, L, H)
coeff = coeff.view(I, C, Q, L, H).permute(2, 0, 1, 3, 4)
if fp16:
coeff = coeff.to(torch.float16)
return coeff
# return coeff.view(I, C, Q, L, H).permute(2, 0, 1, 3, 4) # (Q, I, C, L, H)
class KernelCoefficientFast(torch.autograd.Function):
# Compute sum{n} { a[n] / (b[l] - c[n]) }
@staticmethod
def forward(ctx, a_n, b_l, c_n):
if not a_n.is_cuda and b_l.is_cuda and c_n.is_cuda:
raise NotImplementedError("Only support CUDA tensors")
ctx.save_for_backward(a_n, b_l, c_n)
is_float = 1
if b_l.dtype == torch.complex128:
is_float = 0
return ssm_kernel_coefficient_cuda.forward(a_n, b_l, c_n, is_float)
@staticmethod
def backward(ctx, dout):
a_n, b_l, c_n = ctx.saved_tensors
is_float = 1
if b_l.dtype == torch.complex128:
is_float = 0
da_n, db_l, dc_n = ssm_kernel_coefficient_cuda.backward(a_n, b_l, c_n, dout, is_float)
return da_n, db_l, dc_n
if __name__ == "__main__":
# Test
num_heads = 4
input_dim = 64
hid_dim = 32
seq_len = 256
dtype=torch.float32
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.manual_seed(0)
b = torch.randn(num_heads, 2, input_dim, hid_dim, device=device, dtype=dtype).requires_grad_(True)
c = torch.randn(num_heads, 2, input_dim, hid_dim, device=device, dtype=dtype).requires_grad_(True)
z = torch.randn(seq_len, 2, device=device, dtype=dtype)
t = torch.randn(num_heads, input_dim, device=device, dtype=dtype).requires_grad_(True)
d = torch.randn(num_heads, hid_dim, device=device, dtype=dtype).requires_grad_(True)
zz = z.to(torch.float64)
dd = d.to(torch.float64)
tt = t.to(torch.float64)
bb = b.to(torch.float64)
cc = c.to(torch.float64)
ans64 = compute_slow(zz, dd, tt, bb, cc)
ans = compute_slow(z, d, t, b, c)
out64 = compute_fast(zz, dd, tt, bb, cc)
out = compute_fast(z, d, t, b, c)
err = torch.rand_like(out)
ans64_dd, ans64_dt, ans64_db, ans64_dc = torch.autograd.grad(
ans64, (dd, tt, bb, cc), err, retain_graph=True
)
ans_dd, ans_dt, ans_db, ans_dc = torch.autograd.grad(
ans, (d, t, b, c), err, retain_graph=True
)
out64_dd, out64_dt, out64_db, out64_dc = torch.autograd.grad(
out64, (dd, tt, bb, cc), err, retain_graph=True
)
out_dd, out_dt, out_db, out_dc = torch.autograd.grad(
out, (d, t, b, c), err, retain_graph=True
)
print()
print("out: max abs error (ans64, out64)", torch.max(torch.abs(out64 - ans64)))
print("dd: max abs error (ans64, out64)", torch.max(torch.abs(ans64_dd - out64_dd)))
print("dt: max abs error (ans64, out64)", torch.max(torch.abs(ans64_dt - out64_dt)))
print("db: max abs error (ans64, out64)", torch.max(torch.abs(ans64_db - out64_db)))
print("dc: max abs error (ans64, out64)", torch.max(torch.abs(ans64_dc - out64_dc)))
print()
print("out: max abs error (ans64, out)", torch.max(torch.abs(out - ans64)))
print("dd: max abs error (ans64, out)", torch.max(torch.abs(ans64_dd - out_dd)))
print("dt: max abs error (ans64, out)", torch.max(torch.abs(ans64_dt - out_dt)))
print("db: max abs error (ans64, out)", torch.max(torch.abs(ans64_db - out_db)))
print("dc: max abs error (ans64, out)", torch.max(torch.abs(ans64_dc - out_dc)))
print()
print("out: max abs error (ans, out64)", torch.max(torch.abs(out64 - ans)))
print("dd: max abs error (ans, out64)", torch.max(torch.abs(ans_dd - out64_dd)))
print("dt: max abs error (ans, out64)", torch.max(torch.abs(ans_dt - out64_dt)))
print("db: max abs error (ans, out64)", torch.max(torch.abs(ans_db - out64_db)))
print("dc: max abs error (ans, out64)", torch.max(torch.abs(ans_dc - out64_dc)))
print()
print("out: max abs error (ans, out)", torch.max(torch.abs(out - ans64)))
print("dd: max abs error (ans, out)", torch.max(torch.abs(ans_dd - out_dd)))
print("dt: max abs error (ans, out)", torch.max(torch.abs(ans_dt - out_dt)))
print("db: max abs error (ans, out)", torch.max(torch.abs(ans_db - out_db)))
print("dc: max abs error (ans, out)", torch.max(torch.abs(ans_dc - out_dc))) | 6,529 | 35.077348 | 102 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/mssm/ssm_kernel/setup.py | import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
from pathlib import Path
import sys
import warnings
import os
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
# print(bare_metal_minor, bare_metal_major)
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
print("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
return int(bare_metal_minor), int(bare_metal_major)
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new
# ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
cc_flag = []
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
cc_flag.append('-gencode')
cc_flag.append('arch=compute_75,code=sm_75')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,code=sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_86,code=sm_86')
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
# subprocess.run(["git", "submodule", "update", "--init", "cutlass"])
# subprocess.run(["git", "clone", "https://github.com/NVIDIA/cutlass.git", "multihead_attn/cutlass"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "ed2ed4d667ce95e1371bd62db32b6a114e774336"])
# subprocess.run(["git", "-C", "cutlass", "checkout", "fe3438a3c1ccbdd03dc1aca3bb68099a9e2a58bd"])
bare_metal_minor, bare_metal_major = check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
print("GENERATOR FLAG:", generator_flag)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
ext_modules.append(
CUDAExtension(
name="ssm_kernel_coefficient_cuda",
sources=[
"ssm_kernel_coefficient_binding.cpp",
"ssm_kernel_coefficient_binding_cuda.cu"
],
extra_compile_args={
"cxx": ["-O3", "-std=c++17"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-O3",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
"--ptxas-options=-v",
"-lineinfo"
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[
Path(this_dir)
],
)
)
setup(
name='ssm_cuda_bindings',
version='0.1', \
description='CUDA/C++ Pytorch extension for multi-head attention ported from NVIDIA apex',
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| 5,691 | 34.798742 | 101 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/wavlm_modules.py | # --------------------------------------------------------
# WavLM: Large-Scale Self-Supervised Pre-training for Full Stack Speech Processing (https://arxiv.org/abs/2110.13900.pdf)
# Github source: https://github.com/microsoft/unilm/tree/master/wavlm
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/pytorch/fairseq
# --------------------------------------------------------
import math
import warnings
from typing import Dict, Optional, Tuple
import torch
from torch import Tensor, nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.optimized.linear import Linear
from onmt.modules.optimized.self_attention_attnbias_func import self_attn_bias_func
class WavLMMultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
has_relative_attention_bias=False,
num_buckets=32,
max_distance=128,
gru_rel_pos=False,
rescale_init=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = nn.Dropout(dropout)
self.has_relative_attention_bias = has_relative_attention_bias
self.num_buckets = num_buckets
self.max_distance = max_distance
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(num_buckets, num_heads)
self.head_dim = embed_dim // num_heads
self.q_head_dim = self.head_dim
self.k_head_dim = self.head_dim
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
k_bias = True
if rescale_init:
k_bias = False
k_embed_dim = embed_dim
q_embed_dim = embed_dim
self.k_proj = Linear(self.kdim, k_embed_dim, bias=k_bias)
self.v_proj = Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = Linear(embed_dim, q_embed_dim, bias=bias)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.gru_rel_pos = gru_rel_pos
if self.gru_rel_pos:
self.grep_linear = Linear(self.q_head_dim, 8)
self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1))
self.reset_parameters()
self.fast_attention = False
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
if self.has_relative_attention_bias:
nn.init.xavier_normal_(self.relative_attention_bias.weight)
def _relative_positions_bucket(self, relative_positions, bidirectional=True):
num_buckets = self.num_buckets
max_distance = self.max_distance
relative_buckets = 0
if bidirectional:
num_buckets = num_buckets // 2
relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets
relative_positions = torch.abs(relative_positions)
else:
relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions))
max_exact = num_buckets // 2
is_small = relative_positions < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_positions.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
context_position = torch.arange(query_length, dtype=torch.long)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
relative_position = memory_position - context_position
relative_position_bucket = self._relative_positions_bucket(
relative_position,
bidirectional=True
)
relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(relative_position_bucket)
values = values.permute([2, 0, 1])
return values
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = False,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
position_bias: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if self.has_relative_attention_bias and position_bias is None:
position_bias = self.compute_bias(tgt_len, src_len)
position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len)
if (
not is_tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
and self.q_head_dim == self.head_dim
):
assert key is not None and value is not None
assert attn_mask is None
attn_mask_rel_pos = None
if position_bias is not None:
attn_mask_rel_pos = position_bias
if self.gru_rel_pos:
# from [T x B x H] to [B x T x H]
query_layer = query.transpose(0, 1)
# [B x T x head x -1]
new_x_shape = query_layer.size()[:-1] + (self.num_heads, -1)
# [B x T x head x head_size]
query_layer = query_layer.view(*new_x_shape)
# [B x H x T x head_size]
query_layer = query_layer.permute(0, 2, 1, 3)
_B, _H, _L, __ = query_layer.size()
gate_input = self.grep_linear(query_layer).view(
_B, _H, _L, 2, 4).sum(-1, keepdim=False)
# inplace sigmoid
gate_a, gate_b = gate_input.sigmoid_().chunk(2, dim=-1)
gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0
attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, -1, 1) * position_bias
attn_mask_rel_pos = attn_mask_rel_pos.view((-1, tgt_len, tgt_len))
else:
attn_mask_rel_pos = query.new_zeros(*(bsz * self.num_heads, tgt_len, tgt_len))
# k_proj_bias = self.k_proj.bias
# if k_proj_bias is None:
# k_proj_bias = torch.zeros_like(self.q_proj.bias)
# if self.fast_attention:
# is_training = self.training
# low_precision = True
# in_proj_weight = self.proj_weight
# out_proj_weight = self.out_proj.weight
# recompute = False
# rotary = False
# positions = None
#
# x, attn = self_attn_bias_func(False, is_training, self.num_heads, query, attn_mask_rel_pos,
# in_proj_weight, out_proj_weight,
# self.proj_bias, self.out_proj.bias,
# key_padding_mask, self.dropout_module.p,
# rotary, positions,
# False, None, # incremental and state and double precision
# low_precision, True, recompute) # learnable_pos + return-coverage
# else:
x, attn = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training,
# self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask_rel_pos,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
return x, attn, position_bias
else:
# this code path is never reached with wavlm
raise NotImplementedError
def convert_fast_attention(self):
pass
# if self.fast_attention:
# return
# self.fast_attention = True
# assert self.qkv_same_dim, "Only works with QKV same dim."
# w_q = self.q_proj.weight.clone()
# w_k = self.k_proj.weight.clone()
# w_v = self.v_proj.weight.clone()
# weights = [w_q, w_k, w_v]
# weight_ = torch.cat(weights, dim=0).contiguous()
#
# b_q = self.q_proj.bias.clone()
# b_k = self.k_proj.bias.clone()
# b_v = self.v_proj.bias.clone()
# biases = [b_q, b_k, b_v]
# bias_ = torch.cat(biases, dim=0).contiguous()
#
# head_dim = self.head_dim
# heads = self.num_heads
# input_dim = self.embed_dim
#
# # when we concatenate the weights, the output has the size 3 * D (3 -> heads -> head_dim)
# # the fast attention module requires (heads -> 3 -> head_dim)
# weight_ = weight_.reshape(3 * head_dim * heads, input_dim).view(3, heads, head_dim, input_dim).transpose(0, 1). \
# reshape(-1, input_dim)
#
# bias_ = bias_.reshape(3 * head_dim * heads).view(3, heads, head_dim).transpose(0, 1).reshape(-1)
#
# weight_t = torch.Tensor(3 * input_dim, input_dim)
# bias_t = torch.Tensor(3 * input_dim)
# weight_t.copy_(weight_)
# bias_t.copy_(bias_)
# self.proj_weight = Parameter(weight_t)
# self.proj_bias = Parameter(bias_t)
#
# self.proj_weight.requires_grad = self.q_proj.weight.requires_grad
# self.proj_bias.requires_grad = self.q_proj.bias.requires_grad
# del self.q_proj, self.k_proj, self.v_proj
def get_activation_fn(activation: str):
"""Returns the activation function corresponding to `activation`"""
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
warnings.warn(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
elif activation == "glu":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
class GLU_Linear(nn.Module):
def __init__(self, input_dim, output_dim, glu_type="sigmoid", bias_in_glu=True):
super(GLU_Linear, self).__init__()
self.glu_type = glu_type
self.output_dim = output_dim
if glu_type == "sigmoid":
self.glu_act = torch.nn.Sigmoid()
elif glu_type == "swish":
self.glu_act = torch.nn.SiLU()
elif glu_type == "relu":
self.glu_act = torch.nn.ReLU()
elif glu_type == "gelu":
self.glu_act = torch.nn.GELU()
if bias_in_glu:
self.linear = nn.Linear(input_dim, output_dim * 2, True)
else:
self.linear = nn.Linear(input_dim, output_dim * 2, False)
def forward(self, x):
# to be consistent with GLU_Linear, we assume the input always has the #channel (#dim) in the last dimension of the tensor, so need to switch the dimension first for 1D-Conv case
x = self.linear(x)
if self.glu_type == "bilinear":
x = (x[:, :, 0:self.output_dim] * x[:, :, self.output_dim:self.output_dim * 2])
else:
x = (x[:, :, 0:self.output_dim] * self.glu_act(x[:, :, self.output_dim:self.output_dim * 2]))
return x
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, WavLMMultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data) | 18,345 | 39.320879 | 186 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/enum.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, EnumMeta
from typing import List
class StrEnumMeta(EnumMeta):
# this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see
# https://github.com/facebookresearch/hydra/issues/1156
@classmethod
def __instancecheck__(cls, other):
return "enum" in str(type(other))
class StrEnum(Enum, metaclass=StrEnumMeta):
def __str__(self):
return self.value
def __eq__(self, other: str):
return self.value == other
def __repr__(self):
return self.value
def __hash__(self):
return hash(str(self))
def ChoiceEnum(choices: List[str]):
"""return the Enum class used to enforce list of choices"""
return StrEnum("Choices", {k: k for k in choices})
LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"])
DDP_BACKEND_CHOICES = ChoiceEnum([
"c10d", # alias for pytorch_ddp
"fully_sharded", # FullyShardedDataParallel from fairscale
"legacy_ddp",
"no_c10d", # alias for legacy_ddp
"pytorch_ddp",
"slow_mo",
])
DDP_COMM_HOOK_CHOICES = ChoiceEnum(["none", "fp16"])
DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta", "huffman"])
GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"])
GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum(
["unigram", "ensemble", "vote", "dp", "bs"]
)
ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"])
PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"])
PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"]) | 1,753 | 31.481481 | 107 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/adapter.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from onmt.modules.layer_norm import LayerNorm
class Adapter(torch.nn.Module):
def __init__(self, input_dim, downsample_factor=2):
self.input_dim = input_dim
self.middle_dim = input_dim // downsample_factor
super(Adapter, self).__init__()
self.linear_in = nn.Linear(input_dim, self.middle_dim)
self.linear_out = nn.Linear(self.middle_dim, input_dim)
self.norm = LayerNorm(input_dim)
self.fused = False
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
self.reset_parameters()
def reset_parameters(self):
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
with torch.no_grad():
normal_(self.linear_in.weight.data)
normal_(self.linear_out.weight.data)
self.linear_in.bias.data.zero_()
self.linear_out.bias.data.zero_()
def forward(self, input):
if self.fused:
weights = [self.linear_in.weight, self.linear_out.weight]
biases = [self.linear_in.bias, self.linear_out.bias]
# seq_len, bsz, hidden_size = input.size(0), input.size(1), input.size(2)
input_norm = self.norm(input)
input = self.fused_function(0.0, False, input_norm,
*weights, *biases)
return input
else:
return self.linear_out(F.relu(self.linear_in(self.norm(input))))
class MultilingualAdapter(torch.nn.Module):
def __init__(self, n_languages, input_size, downsample_factor=4):
self.n_languages = n_languages
self.input_size = input_size
super(MultilingualAdapter, self).__init__()
self.adapters = nn.ModuleList([Adapter(input_size, downsample_factor) for _ in range(self.n_languages)])
def forward(self, input, lang=None, mixture=None):
"""
:param input: tensor TxBxH
:param lang: tensor size 1 (language for the batch)
:param mixture: tensor size B x n_language (mixture for the minibatch)
:return:
"""
if lang is not None:
assert mixture is None
if lang.numel() != 1:
print("Expected singled unit tensor, but get", lang.size())
assert lang.numel() == 1
adapter = self.adapters[lang.item()]
return adapter(input)
if mixture is not None:
assert mixture.size(0) == input.size(1) and mixture.size(1) == self.n_languages
outputs = list()
for i in range(self.n_languages):
# mixture size is [B x n_language]
mixture_weight = mixture[:, i].unsqueeze(0).squeeze(-1)
outputs.append(self.adapters[i](input)) * mixture_weight
outputs = torch.stack(outputs).sum(0) # n_languages x T x B x H
outputs = torch.sum(outputs, 0, keepdim=False) # -> T x B x H
return outputs
| 3,345 | 34.595745 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/utils.py | try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from omegaconf import DictConfig, OmegaConf, open_dict, _utils
import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple, Callable, Dict, List, TYPE_CHECKING
from omegaconf import DictConfig, OmegaConf, open_dict, _utils
from argparse import ArgumentError, ArgumentParser, Namespace
import numpy as np
import torch
def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]):
# this will be deprecated when we get rid of argparse and model_overrides logic
REGISTRIES = {}
with open_dict(cfg):
for k in cfg.keys():
# "k in cfg" will return false if its a "mandatory value (e.g. ???)"
if k in cfg and isinstance(cfg[k], DictConfig):
if k in overrides and isinstance(overrides[k], dict):
for ok, ov in overrides[k].items():
if isinstance(ov, dict) and cfg[k][ok] is not None:
overwrite_args_by_name(cfg[k][ok], ov)
else:
cfg[k][ok] = ov
else:
overwrite_args_by_name(cfg[k], overrides)
elif k in cfg and isinstance(cfg[k], Namespace):
for override_key, val in overrides.items():
setattr(cfg[k], override_key, val)
elif k in overrides:
if (
k in REGISTRIES
and overrides[k] in REGISTRIES[k]["dataclass_registry"]
):
cfg[k] = DictConfig(
REGISTRIES[k]["dataclass_registry"][overrides[k]]
)
overwrite_args_by_name(cfg[k], overrides)
cfg[k]._name = overrides[k]
else:
cfg[k] = overrides[k]
class omegaconf_no_object_check:
def __init__(self):
self.old_is_primitive = _utils.is_primitive_type
def __enter__(self):
_utils.is_primitive_type = lambda _: True
def __exit__(self, type, value, traceback):
_utils.is_primitive_type = self.old_is_primitive
def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:
"""Convert a flat argparse.Namespace to a structured DictConfig."""
# Here we are using field values provided in args to override counterparts inside config object
overrides, deletes = override_module_args(args)
# configs will be in fairseq/config after installation
config_path = os.path.join("..", "config")
GlobalHydra.instance().clear()
with initialize(config_path=config_path):
try:
composed_cfg = compose("config", overrides=overrides, strict=False)
except:
logger.error("Error when composing. Overrides: " + str(overrides))
raise
for k in deletes:
composed_cfg[k] = None
cfg = OmegaConf.create(
OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True)
)
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
with omegaconf_no_object_check():
if cfg.task is None and getattr(args, "task", None):
cfg.task = Namespace(**vars(args))
from fairseq.tasks import TASK_REGISTRY
_set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task])
cfg.task._name = args.task
if cfg.model is None and getattr(args, "arch", None):
cfg.model = Namespace(**vars(args))
from fairseq.models import ARCH_MODEL_REGISTRY
_set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch])
cfg.model._name = args.arch
if cfg.optimizer is None and getattr(args, "optimizer", None):
cfg.optimizer = Namespace(**vars(args))
from fairseq.optim import OPTIMIZER_REGISTRY
_set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer])
cfg.optimizer._name = args.optimizer
if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None):
cfg.lr_scheduler = Namespace(**vars(args))
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
_set_legacy_defaults(
cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler]
)
cfg.lr_scheduler._name = args.lr_scheduler
if cfg.criterion is None and getattr(args, "criterion", None):
cfg.criterion = Namespace(**vars(args))
from fairseq.criterions import CRITERION_REGISTRY
_set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion])
cfg.criterion._name = args.criterion
OmegaConf.set_struct(cfg, True)
return cfg
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def is_xla_tensor(tensor):
return torch.is_tensor(tensor) and tensor.device.type == "xla"
def index_put(tensor, indices, value):
if is_xla_tensor(tensor):
for _ in range(indices.dim(), tensor.dim()):
indices = indices.unsqueeze(-1)
if indices.size(-1) < tensor.size(-1):
indices = indices.expand_as(tensor)
tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices)
else:
tensor[indices] = value
return tensor
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
from .fairseq_modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
| 11,673 | 36.536977 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/wav2vec2.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Tuple
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import compute_mask_indices, get_activation_fn, get_available_activation_fns
from .enum import ChoiceEnum
from torch.cuda.amp import autocast
from .fairseq_modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
GumbelVectorQuantizer,
MultiheadAttention,
SamePad,
TransposeLast,
index_copy
)
from onmt.modules.layer_norm import LayerNorm
from onmt.modules.optimized.dropout_add import fused_dropout_add
from onmt.modules.optimized.linear import factorize_linear
from .utils import buffered_arange, index_put, is_xla_tensor
# from fairseq.dataclass import FairseqDataclass
# from fairseq.models.wav2vec import Wav2Vec2Config
from .dataclass import Wav2Vec2Config
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
def dropout_residual_connection(x, residual, dropout_module, is_training):
if fused_dropout_add is not None and dropout_module.p > 0 and is_training:
return fused_dropout_add(x, residual, dropout_module.p, is_training)
return dropout_module(x) + residual
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
if not module.fast_attention:
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
else:
normal_(module.proj_weight.data)
#
# @dataclass
# class Wav2Vec2Config(FairseqDataclass):
# extractor_mode: EXTRACTOR_MODE_CHOICES = field(
# default="default",
# metadata={
# "help": "mode for feature extractor. default has a single group norm with d "
# "groups in the first conv block, whereas layer_norm has layer norms in "
# "every block (meant to use with normalize=True)"
# },
# )
# encoder_layers: int = field(
# default=12, metadata={"help": "num encoder layers in the transformer"}
# )
# encoder_embed_dim: int = field(
# default=768, metadata={"help": "encoder embedding dimension"}
# )
# encoder_ffn_embed_dim: int = field(
# default=3072, metadata={"help": "encoder embedding dimension for FFN"}
# )
# encoder_attention_heads: int = field(
# default=12, metadata={"help": "num encoder attention heads"}
# )
# activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
# default="gelu", metadata={"help": "activation function to use"}
# )
#
# # dropouts
# dropout: float = field(
# default=0.1, metadata={"help": "dropout probability for the transformer"}
# )
# attention_dropout: float = field(
# default=0.1, metadata={"help": "dropout probability for attention weights"}
# )
# activation_dropout: float = field(
# default=0.0, metadata={"help": "dropout probability after activation in FFN"}
# )
# encoder_layerdrop: float = field(
# default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
# )
# dropout_input: float = field(
# default=0.0,
# metadata={"help": "dropout to apply to the input (after feat extr)"},
# )
# dropout_features: float = field(
# default=0.0,
# metadata={"help": "dropout to apply to the features (after feat extr)"},
# )
#
# final_dim: int = field(
# default=0,
# metadata={
# "help": "project final representations and targets to this many dimensions."
# "set to encoder_embed_dim is <= 0"
# },
# )
# layer_norm_first: bool = field(
# default=False, metadata={"help": "apply layernorm first in the transformer"}
# )
# conv_feature_layers: str = field(
# default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
# metadata={
# "help": "string describing convolutional feature extraction layers in form of a python list that contains "
# "[(dim, kernel_size, stride), ...]"
# },
# )
# conv_bias: bool = field(
# default=False, metadata={"help": "include bias in conv encoder"}
# )
# logit_temp: float = field(
# default=0.1, metadata={"help": "temperature to divide logits by"}
# )
# quantize_targets: bool = field(
# default=False, metadata={"help": "use quantized targets"}
# )
# quantize_input: bool = field(
# default=False, metadata={"help": "use quantized inputs"}
# )
# same_quantizer: bool = field(
# default=False, metadata={"help": "use same quantizer for inputs and targets"}
# )
# target_glu: bool = field(
# default=False, metadata={"help": "adds projection + glu to targets"}
# )
# feature_grad_mult: float = field(
# default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
# )
# quantizer_depth: int = field(
# default=1,
# metadata={"help": "number of quantizer layers"},
# )
# quantizer_factor: int = field(
# default=3,
# metadata={
# "help": "dimensionality increase for inner quantizer layers (if depth > 1)"
# },
# )
# latent_vars: int = field(
# default=320,
# metadata={"help": "number of latent variables V in each group of the codebook"},
# )
# latent_groups: int = field(
# default=2,
# metadata={"help": "number of groups G of latent variables in the codebook"},
# )
# latent_dim: int = field(
# default=0,
# metadata={
# "help": "if > 0, uses this dimensionality for latent variables. "
# "otherwise uses final_dim / latent_groups"
# },
# )
#
# # masking
# mask_length: int = field(default=10, metadata={"help": "mask length"})
# mask_prob: float = field(
# default=0.65, metadata={"help": "probability of replacing a token with mask"}
# )
# mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
# default="static", metadata={"help": "how to choose mask length"}
# )
# mask_other: float = field(
# default=0,
# metadata={
# "help": "secondary mask argument (used for more complex distributions), "
# "see help in compute_mask_indices"
# },
# )
# no_mask_overlap: bool = field(
# default=False, metadata={"help": "whether to allow masks to overlap"}
# )
# mask_min_space: int = field(
# default=1,
# metadata={"help": "min space between spans (if no overlap is enabled)"},
# )
#
# # channel masking
# mask_channel_length: int = field(
# default=10, metadata={"help": "length of the mask for features (channels)"}
# )
# mask_channel_prob: float = field(
# default=0.0, metadata={"help": "probability of replacing a feature with 0"}
# )
# mask_channel_before: bool = False
# mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
# default="static",
# metadata={"help": "how to choose mask length for channel masking"},
# )
# mask_channel_other: float = field(
# default=0,
# metadata={
# "help": "secondary mask argument (used for more complex distributions), "
# "see help in compute_mask_indicesh"
# },
# )
# no_mask_channel_overlap: bool = field(
# default=False, metadata={"help": "whether to allow channel masks to overlap"}
# )
# mask_channel_min_space: int = field(
# default=1,
# metadata={"help": "min space between spans (if no overlap is enabled)"},
# )
#
# # negative selection
# num_negatives: int = field(
# default=100,
# metadata={"help": "number of negative examples from the same sample"},
# )
# negatives_from_everywhere: bool = field(
# default=False,
# metadata={"help": "sample negatives from everywhere, not just masked states"},
# )
# cross_sample_negatives: int = field(
# default=0, metadata={"help": "number of negative examples from the any sample"}
# )
# codebook_negatives: int = field(
# default=0, metadata={"help": "number of negative examples codebook"}
# )
#
# # positional embeddings
# conv_pos: int = field(
# default=128,
# metadata={"help": "number of filters for convolutional positional embeddings"},
# )
# conv_pos_groups: int = field(
# default=16,
# metadata={"help": "number of groups for convolutional positional embedding"},
# )
#
# latent_temp: Tuple[float, float, float] = field(
# default=(2, 0.5, 0.999995),
# metadata={
# "help": "temperature for latent variable sampling. "
# "can be tuple of 3 values (start, end, decay)"
# },
# )
# @register_model("wav2vec2", dataclass=Wav2Vec2Config)
class Wav2Vec2Model(torch.nn.Module):
def __init__(self, cfg: Wav2Vec2Config,
favor=False, feature_redraw_interval=1000, auto_check_redraw=True,
weight_drop=0.0, predict_language=False, n_languages=1):
super().__init__()
self.rotary_attention = False
self.relative_attention = False
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before if hasattr(cfg, 'mask_channel_before') else True
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.input_quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
if cfg.quantize_targets:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
self.quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth if hasattr(cfg, 'quantizer_depth') else 1,
weight_proj_factor=cfg.quantizer_factor if hasattr(cfg, 'quantizer_factor') else 3,
)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
if cfg.quantize_input:
if cfg.same_quantizer and self.quantizer is not None:
vq_dim = final_dim
self.input_quantizer = self.quantizer
else:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim
self.input_quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg, favor=favor, weight_drop=weight_drop,
predict_language=predict_language, n_languages=n_languages)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
self.favor = favor
def replace_attn_with_s4(self, cfg):
self.encoder.replace_attn_with_s4(cfg)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
def clean_unused_weights(self):
self.input_quantizer = None
self.quantizer = None
self.target_glu = None
self.final_proj = None
self.project_q = None
return
@classmethod
def build_model(cls, cfg: Wav2Vec2Config, task=None):
"""Build a new model instance."""
return cls(cfg)
def apply_mask(
self,
x,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb.type_as(x))
else:
mask_indices = None
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel_indices, 0)
return x, mask_indices
def sample_negatives(self, y, num, padding_count=None):
if self.n_negatives == 0 and self.cross_sample_negatives == 0:
return y.new(0)
bsz, tsz, fsz = y.shape
y = y.view(-1, fsz) # BTC => (BxT)C
# FIXME: what happens if padding_count is specified?
cross_high = tsz * bsz
high = tsz - (padding_count or 0)
with torch.no_grad():
assert high > 1, f"{bsz, tsz, fsz}"
if self.n_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * num)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * num),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[neg_idxs.view(-1)]
negs = negs.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
return negs, neg_idxs
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all(-1)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits = logits / self.logit_temp
if is_xla_tensor(logits) or neg_is_pos.any():
fillval = -float(2 ** 30)
if not hasattr(self, "_inftensor"):
self._inftensor = (
torch.tensor(fillval).to(x.device)
if is_xla_tensor(logits)
else float("-inf")
)
logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(
input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def forward(
self,
source,
padding_mask=None,
positions=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
precomputed_tdnn=False,
quantize=False, quantize_only=False,
lang=None,
atb=None,
checkpointing_ffn=False,
checkpointing_self_attn=False,
**kwargs
):
# if the tdnn features are precomputed then skip them
if not precomputed_tdnn:
if self.feature_grad_mult > 0 or source.requires_grad:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
if not features_only:
features_pen = features.float().pow(2).mean()
# transpose from B x C x T to B x T x C (because conv takes input as B x 1 x T)
features = features.transpose(1, 2)
else:
features = source
# perform layer norm ... but check grad mode
current_grad_mode = torch.is_grad_enabled()
if current_grad_mode:
torch.set_grad_enabled(self.layer_norm.weight.requires_grad)
features = self.layer_norm(features)
torch.set_grad_enabled(current_grad_mode)
if quantize:
assert self.quantizer is not None
with torch.no_grad():
quantizer_output = self.quantizer.forward_idx(features)
else:
quantizer_output = None
if features_only:
unmasked_features = None
else:
unmasked_features = features.clone()
if not precomputed_tdnn: # then compute the padding mask after the TDNN step
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
padding_mask = None
if quantize_only:
quantized_x, quantized_target = quantizer_output
output_dict = dict()
output_dict['quantized_x'] = quantized_x # b x t x ?
output_dict['quantized_target'] = quantized_target # b x t x num_groups
output_dict['padding_mask'] = padding_mask
return output_dict
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
# unmasked_features = self.dropout_features(unmasked_features)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
# if self.input_quantizer:
# q = self.input_quantizer(features, produce_targets=False)
# features = q["x"]
# num_vars = q["num_vars"]
# code_ppl = q["code_perplexity"]
# prob_ppl = q["prob_perplexity"]
# curr_temp = q["temp"]
# features = self.project_inp(features)
if mask:
x, mask_indices = self.apply_mask(
features,
padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
if not is_xla_tensor(x) and mask_indices is not None and not features_only:
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
y = unmasked_features[mask_indices].view(
unmasked_features.size(0), -1, unmasked_features.size(-1)
)
else:
y = unmasked_features
else:
x = features
y = unmasked_features
mask_indices = None
x, layer_results, pred_lang = self.encoder(x, padding_mask=padding_mask, layer=layer, lang=lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
if features_only:
output_dict = {
"x": x,
"padding_mask": padding_mask,
"features": unmasked_features,
"layer_results": layer_results,
"pred_lang": pred_lang
}
if quantize:
quantized_x, quantized_target = quantizer_output
output_dict['quantized_x'] = quantized_x # b x t x ?
output_dict['quantized_target'] = quantized_target # b x t x num_groups
return output_dict
if self.quantizer:
q = self.quantizer(y, produce_targets=False)
y = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
y = self.project_q(y)
if self.negatives_from_everywhere:
neg_cands = self.quantizer(unmasked_features, produce_targets=False)[
"x"
]
negs, _ = self.sample_negatives(
neg_cands,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if self.codebook_negatives > 0:
cb_negs = self.quantizer.sample_from_codebook(
y.size(0) * y.size(1), self.codebook_negatives
)
cb_negs = cb_negs.view(
self.codebook_negatives, y.size(0), y.size(1), -1
) # order doesnt matter
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y = self.project_q(y)
if self.negatives_from_everywhere:
negs, _ = self.sample_negatives(
unmasked_features,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if not is_xla_tensor(x):
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
x = x[mask_indices].view(x.size(0), -1, x.size(-1))
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
x = self.final_proj(x)
x = self.compute_preds(x, y, negs)
result = {
"x": x,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
if prob_ppl is not None:
result["prob_perplexity"] = prob_ppl
result["code_perplexity"] = code_ppl
result["num_vars"] = num_vars
result["temp"] = curr_temp
return result
def quantize(self, x):
assert self.quantizer is not None
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_conv_features(self, source, padding_mask):
with torch.no_grad():
features = self.feature_extractor(source)
# transpose from B x C x T to B x T x C (because conv takes input as B x 1 x T)
features = features.transpose(1, 2).contiguous()
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
bsz, seq_len = features.size(0), features.size(1)
padding_mask = features.new(bsz, seq_len).zero_()
return features, padding_mask.long()
def extract_features(self, source, padding_mask, mask=False, layer=None, precomputed_tdnn=False,
lang=None, atb=None):
res = self.forward(
source, padding_mask, mask=mask, features_only=True, layer=layer, precomputed_tdnn=precomputed_tdnn,
lang=lang, atb=atb
)
return res
def get_logits(self, net_output):
logits = net_output["x"]
logits = logits.transpose(0, 2)
logits = logits.reshape(-1, logits.size(-1))
return logits
def get_targets(self, sample, net_output, expand_steps=True):
x = net_output["x"]
return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)
def get_extra_losses(self, net_output):
pen = []
if "prob_perplexity" in net_output:
pen.append(
(net_output["num_vars"] - net_output["prob_perplexity"])
/ net_output["num_vars"]
)
if "features_pen" in net_output:
pen.append(net_output["features_pen"])
return pen
def remove_pretraining_modules(self, removing_quantizer=True):
if removing_quantizer:
self.quantizer = None
else:
print("[INFO] Keeping the quantizer")
print(self.quantizer)
# self.groups = groups
# self.combine_groups = combine_groups
# self.input_dim = dim
# self.num_vars = num_vars
# self.time_first = time_first
print("Groups: ", self.quantizer.groups)
print("Combine groups: ", self.quantizer.combine_groups)
print("num vars: ", self.quantizer.num_vars)
print(self.quantizer.vars.size())
self.project_q = None
self.target_glu = None
self.final_proj = None
def add_stacked_encoder(self, stacked_encoder):
self.encoder.add_stacked_encoder(stacked_encoder)
def add_relative_attention(self):
self.relative_attention = True
self.encoder.add_relative_attention()
def add_rotary_attention(self):
self.rotary_attention = True
self.encoder.add_rotary_attention()
def convert_fast_attention(self):
model = self.encoder
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
fast_attentions = find_modules(model, MultiheadAttention)
for fast_attention in fast_attentions:
fast_attention.convert_fast_attention()
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT (only for waveforms with 1 channel)
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
class TransformerEncoder(nn.Module):
def __init__(self, args, favor=False, weight_drop=0.0, predict_language=False, n_languages=1):
"""
:param args:
:param favor: Performer Attention
"""
super().__init__()
self.rotary_attention = False
self.positional_encoder = None
self.relative_attention = False
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.favor = favor
self.weight_drop = weight_drop
self.num_heads = args.encoder_attention_heads
self.num_layers = args.encoder_layers
self.attention_dropout = args.attention_dropout
self.activation_dropout = args.activation_dropout
self.deepspeed = False
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
self.layers = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
weight_drop=self.weight_drop,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
favor=favor
)
for _ in range(args.encoder_layers)
]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.args = args
self.apply(init_bert_params)
self.predict_language = predict_language
if self.predict_language:
self.layer_norm_cls = LayerNorm(self.embedding_dim)
self.linear_cls = torch.nn.Linear(self.embedding_dim, n_languages)
else:
self.linear_cls = None
self.layer_norm_cls = None
# from onmt.modules.optimized.fast_mha import fast_bert_mha
# self.fast_bert_mha = fast_bert_mha
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
self.using_s4 = False
def replace_attn_with_s4(self, cfg):
self.using_s4 = True
for layer in self.layers:
layer.replace_attn_with_s4(cfg)
# add stacked encoder from mbart encoder (purely parameter increase)
def add_stacked_encoder(self, stacked_encoder):
stacked_layers = stacked_encoder.layers
args = self.args
for old_layer in stacked_layers:
new_layer = TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
weight_drop=self.weight_drop,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
favor=self.favor
)
# TODO: check layer norm first between new and old layer
new_layer.load_state_dict(old_layer.state_dict())
self.layers.append(new_layer)
def add_relative_attention(self):
self.relative_attention = True
def convert(m_):
classname = m_.__class__.__name__
if classname.find('MultiheadAttention') != -1:
m_.add_relative_attention()
self.layers.apply(convert)
self.positional_encoder = SinusoidalPositionalEmbedding(self.embedding_dim)
def add_rotary_attention(self):
self.rotary_attention = True
def convert(m_):
classname = m_.__class__.__name__
if classname.find('MultiheadAttention') != -1:
m_.add_rotary_attention()
self.layers.apply(convert)
from onmt.modules.rotary_postional_encodings import SinusoidalEmbeddings
self.positional_encoder = SinusoidalEmbeddings(self.embedding_dim // self.num_heads)
def forward(self, x, padding_mask=None, positions=None, layer=None, lang=None, atb=None, checkpointing_ffn=False,
checkpointing_self_attn=False, **kwargs):
x, layer_results, pred_lang = self.extract_features(x, padding_mask, positions, layer, lang=lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
return x, layer_results, pred_lang
def extract_features(self, x, padding_mask=None, positions=None, tgt_layer=None, lang=None, atb=None,
checkpointing_ffn=False, checkpointing_self_attn=False):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.relative_attention and not self.rotary_attention:
positions = None
elif self.relative_attention:
klen = x.size(1)
bsz = x.size(0)
positions = torch.arange(klen - 1, -klen, -1.0, device=x.device, dtype=x.dtype)
pos_emb = self.positional_encoder(positions, bsz=bsz)
pos_emb = F.dropout(pos_emb, p=self.dropout, training=self.training)
positions = pos_emb
elif self.rotary_attention:
positions = self.positional_encoder(x.transpose(0, 1), seq_dim=0)
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# check if flash attention can be run
can_run_fast_bert_mha = False
seq_len = x.size(1)
bsz = x.size(0)
total_bsz = 0
# fast attention refers to using fused QKV matrix multiplication and T-B-H matrix layout to reduce reshaping cost
if self.using_s4:
fast_attention = False
else:
fast_attention = self.layers[0].self_attn.fast_attention
if self.fast_bert_mha and not self.relative_attention and \
fast_attention and x.dtype == torch.half and not self.using_s4:
can_run_fast_bert_mha = True
from onmt.utils import unpad_input
# masked positions = 1 so to compute length we need the (1 -)
if padding_mask is None:
padding_mask = x.new_zeros(bsz, seq_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
# remove paddings from x
x = x.view(-1, x.size(-1)) # flatten [B x T]
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
x = x.index_select(0, non_pad_indices)
# maybe pad it so the first dim % 8 = 0?
total_bsz = x.size(0)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
else:
# print("[INFO] CanNOT run FAST MHA with seq_len", seq_len)
max_len = -1
cu_seqlens = None
non_pad_indices = None
# TODO: add classification layer here.
if self.predict_language:
# B x T x H ->
pred_lang = self.linear_cls(self.layer_norm_cls(x))
_lang = torch.nn.functional.softmax(pred_lang, dim=-1, dtype=torch.float32)
else:
pred_lang = None
_lang = lang
if not self.favor and not can_run_fast_bert_mha:
# B x T x C -> T x B x C (only for vanilla self-attention and s4)
x = x.transpose(0, 1)
x = x.contiguous()
# forward pass through layers
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(x, self_attn_padding_mask=padding_mask, positions=positions,
max_len=max_len, cu_seqlens=cu_seqlens,
lang=_lang, atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# if we remove padding before (for fast bert MHA) then remember to put padding back
# to restore the form B x T X H
if can_run_fast_bert_mha:
# remove the patch
if x.size(0) > total_bsz:
x = x[:total_bsz, :]
from onmt.utils import pad_input
x = index_copy(x, non_pad_indices, bsz * seq_len)
# transpose [B x T x H] to [T x B x H]
x = x.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
if self.predict_language and pred_lang is not None:
pred_lang = index_copy(pred_lang, non_pad_indices, bsz * seq_len)
pred_lang = pred_lang.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
if pred_lang is not None:
pred_lang = pred_lang.transpose(0, 1).contiguous()
return x, layer_results, pred_lang
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
def add_adapters(self, n_languages, adapter_location=1):
for layer in self.layers:
layer.add_adapters(n_languages, adapter_location=adapter_location)
def add_factorize(self, n_languages, rank=4, multiplicative=False, fast=False, dyrank=False, *kwargs):
for layer in self.layers:
layer.add_factorized(n_languages, rank=rank,
multiplicative=multiplicative, fast=fast, dyrank=dyrank)
def freeze_ffn_params(self):
for layer in self.layers:
for p in layer.fc1.parameters():
p.requires_grad = False
for p in layer.fc2.parameters():
p.requires_grad = False
# noinspection PyAttributeOutsideInit
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
weight_drop: float = 0.0,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
favor=False
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.ffn_embedding_dim = ffn_embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.favor = favor
self.has_adapter = False
self.is_factorized = False
self.fast_factorize = False
self.multiplicative_factorize = False
self.using_s4 = False
# Initialize blocks
self.activation_fn = get_activation_fn(activation_fn)
self.activation_fn_name = activation_fn
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
weight_drop=weight_drop,
self_attention=True,
favor=favor
)
self.residual_dropout = dropout
self.dropout1 = nn.Dropout(dropout, inplace=False)
self.dropout2 = nn.Dropout(self.activation_dropout, inplace=True)
self.dropout3 = nn.Dropout(dropout, inplace=False)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
def replace_attn_with_s4(self, s4_cfg):
from ..mssm.mhs4 import MHBiS4EncoderLayer
self.using_s4 = True
s4_layer = MHBiS4EncoderLayer(s4_cfg, s4_only=True)
del self.self_attn
self.self_attn = s4_layer
def add_adapters(self, n_languages, downsampling_factor=4, adapter_location=1):
"""
:param n_languages: one adapter per language
:param downsampling_factor: downsampling rate size for the hidden layer
:param adapter_location:
:return:
"""
self.n_languages = n_languages
self.has_adapter = True
self.adapter_location = adapter_location
from .adapter import MultilingualAdapter
self.adapter = MultilingualAdapter(n_languages, self.embedding_dim, downsample_factor=downsampling_factor)
if adapter_location == 2:
self.mid_adapter = MultilingualAdapter(n_languages, self.embedding_dim,
downsample_factor=downsampling_factor)
def add_factorized(self, n_languages, rank=4, multiplicative=True, fast=False, dyrank=False,
**kwargs):
"""
:param sub_factor_rank:
:param sub_factors:
:param n_languages: int or list of ints?
:param rank: number of vectors
:param multiplicative:
:param fast:
:param dyrank
:return:
"""
# first, tell the attention modules to add factorize
self.self_attn.add_factorized_weights(n_languages, rank=rank,
multiplicative=multiplicative, dyrank=dyrank, fast=fast)
# add factorized for the sub-factors
self.multiplicative_factorize = multiplicative
self.is_factorized = True
self.fast_factorize = fast
self.dyrank = dyrank
embed_dim = self.embedding_dim
ffn_dim = self.ffn_embedding_dim
if multiplicative:
_rank = rank if fast else 1
self.rm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.ffn_embedding_dim))
self.sm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.embedding_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.embedding_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.ffn_embedding_dim))
constant = 1
nn.init.constant_(self.rm_i, constant)
nn.init.constant_(self.sm_i, constant)
nn.init.constant_(self.rm_o, constant)
nn.init.constant_(self.sm_o, constant)
# These parameters are NOT USED with fast factorize
self.r_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_embedding_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embedding_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embedding_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_embedding_dim))
if self.dyrank:
nn.init.zeros_(self.r_i)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.zeros_(self.r_o)
nn.init.normal_(self.s_o, 0.0, 0.02)
else:
nn.init.normal_(self.r_i, 0.0, 0.02)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.normal_(self.r_o, 0.0, 0.02)
nn.init.normal_(self.s_o, 0.0, 0.02)
def get_mlp_weights(self, lang=None, atb=None):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
if lang is not None:
if self.is_factorized:
# First check if we use multiplicative
if self.multiplicative_factorize:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.fast_factorize:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight * mul_factor_in
out_weight = out_weight * mul_factor_out
# For addictive
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.fast_factorize or self.dyrank:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight + add_factor_in
out_weight = out_weight + add_factor_out
return in_weight, out_weight, in_bias, out_bias
def call_self_attn(self, x, self_attn_padding_mask=None, positions=None, attn_mask=None,
max_len=None, cu_seqlens=None, lang=None, atb=None, checkpointing=False):
if not self.using_s4:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
positions=positions,
attn_mask=attn_mask, # this probably doesn't do anything
max_len=max_len, cu_seqlens=cu_seqlens,
lang=lang, atb=atb,
checkpointing=checkpointing
)
return x, attn
# In s4 case:
x = self.self_attn(x, self_attn_padding_mask)
return x, None
def call_factorize_mlp(self, x, lang, activation_fn, dropout_p, training_):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
n_languages, _rank = self.rm_i.size(0), self.rm_i.size(1)
# TODO: mm instead of index select for multiple code
if lang.ndim == 1:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
elif lang.ndim == 2: # for flash attention
rm_i = torch.mm(lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(lang.size(0), _rank,
self.rm_i.size(-1))
sm_i = torch.mm(lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(lang.size(0), _rank,
self.sm_i.size(-1))
rm_o = torch.mm(lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(lang.size(0), _rank,
self.rm_o.size(-1))
sm_o = torch.mm(lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(lang.size(0), _rank,
self.sm_o.size(-1))
elif lang.ndim == 3:
_len, _bsz = lang.size(0), lang.size(1)
_lang = lang.view(_len * _bsz, lang.size(-1))
rm_i = torch.mm(_lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(
_len, _bsz, _rank, self.rm_i.size(-1))
sm_i = torch.mm(_lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(
_len, _bsz, _rank, self.sm_i.size(-1))
rm_o = torch.mm(_lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
_len, _bsz, _rank, self.rm_o.size(-1))
sm_o = torch.mm(_lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
_len, _bsz, _rank, self.sm_o.size(-1))
x = factorize_linear(x, in_weight, in_bias, rm_i, sm_i)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = factorize_linear(x, out_weight, out_bias, rm_o, sm_o)
return x
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
positions=None,
max_len=-1, cu_seqlens=None,
lang=None, atb=None,
checkpointing_ffn=False,
checkpointing_self_attn=False,
**kwargs
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
# is_fast = self.self_attn.fast_attention
is_fast = False
def call_mlp(x, in_weight, out_weight, in_bias, out_bias, activation_fn, dropout_p, training_,
fused, fused_function, checkpointing):
# TODO: check type x torch.half or torch.float32
if fused and x.is_cuda:
dropout_p_ = dropout_p if training_ else 0.0
weights = [in_weight, out_weight]
biases = [in_bias, out_bias]
x = fused_function(dropout_p_, checkpointing, x, *weights, *biases)
else:
x = F.linear(x, in_weight, in_bias)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = F.linear(x, out_weight, out_bias)
return x
if self.has_adapter:
if self.adapter_location == 1:
assert lang is not None
x = self.adapter(x, lang=lang)
x.add_(residual) # residual is before the big FFN
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
# SELF ATTENTION
x, attn = self.call_self_attn(
x,
self_attn_padding_mask=self_attn_padding_mask,
positions=positions,
attn_mask=self_attn_mask,
max_len=max_len, cu_seqlens=cu_seqlens,
lang=lang, atb=atb,
checkpointing=checkpointing_self_attn
)
x = self.dropout1(x) + residual
residual = x
# MLP
x = self.final_layer_norm(x)
if self.fast_factorize:
x = self.call_factorize_mlp(x, lang, self.activation_fn,
self.dropout2.p,
self.training)
else:
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, atb=atb)
x = call_mlp(x, in_weight, out_weight, in_bias, out_bias, self.activation_fn,
self.dropout2.p, self.training,
self.fused, self.fused_function, checkpointing_ffn)
x = self.dropout3(x) + residual
return x, attn
else:
# THE BELOW CODE HAS NEVER BEEN RUN AND TESTED
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
)
# x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, atb=atb)
x = call_mlp(x, in_weight, out_weight, in_bias, out_bias, self.activation_fn,
self.dropout2.p, self.training,
self.fused, self.fused_function, checkpointing_ffn)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
| 63,460 | 36.440118 | 121 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/wavlm.py | import math
import logging
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.layer_norm import LayerNorm
from .fairseq_modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
SamePad,
TransposeLast,
)
from .wavlm_modules import (
WavLMMultiheadAttention,
get_activation_fn,
init_bert_params,
GLU_Linear
)
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
class WavLMConfig:
def __init__(self, cfg=None):
self.extractor_mode: str = "default" # mode for feature extractor. default has a single group norm with d groups in the first conv block, whereas layer_norm has layer norms in every block (meant to use with normalize=True)
self.encoder_layers: int = 12 # num encoder layers in the transformer
self.encoder_embed_dim: int = 768 # encoder embedding dimension
self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN
self.encoder_attention_heads: int = 12 # num encoder attention heads
self.activation_fn: str = "gelu" # activation function to use
self.layer_norm_first: bool = False # apply layernorm first in the transformer
self.conv_feature_layers: str = "[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2" # string describing convolutional feature extraction layers in form of a python list that contains [(dim, kernel_size, stride), ...]
self.conv_bias: bool = False # include bias in conv encoder
self.feature_grad_mult: float = 1.0 # multiply feature extractor var grads by this
self.normalize: bool = False # normalize input to have 0 mean and unit variance during training
# dropouts
self.dropout: float = 0.1 # dropout probability for the transformer
self.attention_dropout: float = 0.1 # dropout probability for attention weights
self.activation_dropout: float = 0.0 # dropout probability after activation in FFN
self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer
self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr)
self.dropout_features: float = 0.0 # dropout to apply to the features (after feat extr)
# masking
self.mask_length: int = 10 # mask length
self.mask_prob: float = 0.65 # probability of replacing a token with mask
self.mask_selection: str = "static" # how to choose mask length
self.mask_other: float = 0 # secondary mask argument (used for more complex distributions), see help in compute_mask_indicesh
self.no_mask_overlap: bool = False # whether to allow masks to overlap
self.mask_min_space: int = 1 # min space between spans (if no overlap is enabled)
# channel masking
self.mask_channel_length: int = 10 # length of the mask for features (channels)
self.mask_channel_prob: float = 0.0 # probability of replacing a feature with 0
self.mask_channel_selection: str = "static" # how to choose mask length for channel masking
self.mask_channel_other: float = 0 # secondary mask argument (used for more complex distributions), see help in compute_mask_indices
self.no_mask_channel_overlap: bool = False # whether to allow channel masks to overlap
self.mask_channel_min_space: int = 1 # min space between spans (if no overlap is enabled)
# positional embeddings
self.conv_pos: int = 128 # number of filters for convolutional positional embeddings
self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding
# relative position embedding
self.relative_position_embedding: bool = False # apply relative position embedding
self.num_buckets: int = 320 # number of buckets for relative position embedding
self.max_distance: int = 1280 # maximum distance for relative position embedding
self.gru_rel_pos: bool = False # apply gated relative position embedding
if cfg is not None:
self.update(cfg)
def update(self, cfg: dict):
self.__dict__.update(cfg)
class WavLM(nn.Module):
def __init__(
self,
cfg: WavLMConfig,
) -> None:
super().__init__()
# logger.info(f"WavLM Config: {cfg.__dict__}")
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.length_adapter = None
#
# def create_length_adapter(self):
# from .length_adapter import LengthAdapter
# self.length_adapter = LengthAdapter(self.cfg.encoder_embed_dim, self.cfg.encoder_embed_dim)
def apply_mask(self, x, padding_mask):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def forward_padding_mask(
self, features: torch.Tensor, padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(
padding_mask.size(0), features.size(1), -1
)
padding_mask = padding_mask.all(-1)
return padding_mask
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
ret_layer_results: bool = False,
):
#
# if self.feature_grad_mult > 0:
# features = self.feature_extractor(source)
# if self.feature_grad_mult != 1.0:
# features = GradMultiply.apply(features, self.feature_grad_mult)
# else:
with torch.no_grad():
features = self.feature_extractor(source)
features = features.transpose(1, 2)
features = self.layer_norm(features)
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
if mask:
x, mask_indices = self.apply_mask(
features, padding_mask
)
else:
x = features
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, layer_results = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1
)
res = {"x": x, "padding_mask": padding_mask, "features": features, "layer_results": layer_results}
feature = res["features"] if ret_conv else res["x"]
if ret_layer_results:
feature = (feature, res["layer_results"])
return feature, res["padding_mask"]
def forward(
self,
source,
padding_mask=None,
positions=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
precomputed_tdnn=False,
quantize=False, quantize_only=False,
lang=None,
atb=None,
checkpointing_ffn=False,
checkpointing_self_attn=False,
**kwargs
):
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
features = features.transpose(1, 2)
features = self.layer_norm(features)
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
if mask:
x, mask_indices = self.apply_mask(
features, padding_mask
)
else:
x = features
x, layer_results = self.encoder(
x,
padding_mask=padding_mask,
layer=layer
)
#
# if self.length_adapter is not None:
# x = self.length_adapter(x)
# if padding_mask is not None:
# padding_mask = padding_mask[:, 2::2][:, 2::2][:, 2::2]
res = {"x": x, "padding_mask": padding_mask, "features": features, "layer_results": layer_results}
return res
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
conv_type: str = "default"
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
self.conv_type = conv_type
if self.conv_type == "default":
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
elif self.conv_type == "conv2d":
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3
(dim, k, stride) = cl
self.conv_layers.append(
torch.nn.Conv2d(in_d, dim, k, stride)
)
self.conv_layers.append(torch.nn.ReLU())
in_d = dim
elif self.conv_type == "custom":
in_d = 1
idim = 80
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3
(dim, k, stride) = cl
self.conv_layers.append(
torch.nn.Conv2d(in_d, dim, k, stride, padding=1)
)
self.conv_layers.append(
torch.nn.LayerNorm([dim, idim])
)
self.conv_layers.append(torch.nn.ReLU())
in_d = dim
if (i + 1) % 2 == 0:
self.conv_layers.append(
torch.nn.MaxPool2d(2, stride=2, ceil_mode=True)
)
idim = int(math.ceil(idim / 2))
else:
pass
def forward(self, x, mask=None):
# BxT -> BxCxT
x = x.unsqueeze(1)
if self.conv_type == "custom":
for conv in self.conv_layers:
if isinstance(conv, nn.LayerNorm):
x = x.transpose(1, 2)
x = conv(x).transpose(1, 2)
else:
x = conv(x)
x = x.transpose(2, 3).contiguous()
x = x.view(x.size(0), -1, x.size(-1))
else:
for conv in self.conv_layers:
x = conv(x)
if self.conv_type == "conv2d":
b, c, t, f = x.size()
x = x.transpose(2, 3).contiguous().view(b, c * f, t)
return x
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
if hasattr(args, "relative_position_embedding"):
self.relative_position_embedding = args.relative_position_embedding
self.num_buckets = args.num_buckets
self.max_distance = args.max_distance
else:
self.relative_position_embedding = False
self.num_buckets = 0
self.max_distance = 0
self.layers = nn.ModuleList(
[
WavLMSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
has_relative_attention_bias=(self.relative_position_embedding and i == 0),
num_buckets=self.num_buckets,
max_distance=self.max_distance,
gru_rel_pos=args.gru_rel_pos,
)
for i in range(args.encoder_layers)
]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, streaming_mask=None, layer=None):
x, layer_results = self.extract_features(x, padding_mask, streaming_mask, layer)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
# Length adapter here?
return x, layer_results
def extract_features(self, x, padding_mask=None, streaming_mask=None, tgt_layer=None):
if padding_mask is not None:
x[padding_mask] = 0
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
z = None
if tgt_layer is not None:
layer_results.append((x, z))
r = None
pos_bias = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z, pos_bias = layer(x, self_attn_padding_mask=padding_mask, need_weights=False,
self_attn_mask=streaming_mask, pos_bias=pos_bias)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
return x, layer_results
class WavLMSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
has_relative_attention_bias: bool = False,
num_buckets: int = 0,
max_distance: int = 0,
rescale_init: bool = False,
gru_rel_pos: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_name = activation_fn
self.activation_fn = get_activation_fn(activation_fn)
self.self_attn = WavLMMultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
has_relative_attention_bias=has_relative_attention_bias,
num_buckets=num_buckets,
max_distance=max_distance,
rescale_init=rescale_init,
gru_rel_pos=gru_rel_pos,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
if self.activation_name == "glu":
self.fc1 = GLU_Linear(self.embedding_dim, ffn_embedding_dim, "swish")
else:
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
self.fused = False
self.fused_function = None
if self.activation_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
def get_mlp_weights(self, lang=None, atb=None):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
if lang is not None:
if self.is_factorized:
# First check if we use multiplicative
if self.multiplicative_factorize:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.fast_factorize:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
# TODO: allow for multiple sub factorizers
if self.sub_factorized and atb is not None:
# print("Found atb at multiplication:", atb)
rm_i = torch.index_select(self.sub_rm_i, 0, atb).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sub_sm_i, 0, atb).squeeze(0)
rm_o = torch.index_select(self.sub_rm_o, 0, atb).squeeze(0)
sm_o = torch.index_select(self.sub_sm_o, 0, atb).squeeze(0)
if self.fast_factorize:
sub_mul_factor_in = torch.mm(rm_i.t(), sm_i)
sub_mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
sub_mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
sub_mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
# has to be multiplicative here
mul_factor_in.mul_(sub_mul_factor_in)
mul_factor_out.mul_(sub_mul_factor_out)
in_weight = in_weight * mul_factor_in
out_weight = out_weight * mul_factor_out
# For addictive
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.fast_factorize:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
if self.sub_factorized and atb is not None:
# print("Found atb at addition:", atb)
r_i = torch.index_select(self.sub_r_i, 0, atb).squeeze(0)
s_i = torch.index_select(self.sub_s_i, 0, atb).squeeze(0)
r_o = torch.index_select(self.sub_r_o, 0, atb).squeeze(0)
s_o = torch.index_select(self.sub_s_o, 0, atb).squeeze(0)
if self.fast_factorize:
sub_add_factor_in = torch.mm(r_i.t(), s_i)
sub_add_factor_out = torch.mm(r_o.t(), s_o)
else:
sub_add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
sub_add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
# has to be additive here
add_factor_in.add(sub_add_factor_in)
add_factor_out.add(sub_add_factor_out)
in_weight = in_weight + add_factor_in
out_weight = out_weight + add_factor_out
return in_weight, out_weight, in_bias, out_bias
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
pos_bias=None, lang=None, atb=None,
**kwargs
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
def call_mlp(x, in_weight, out_weight, in_bias, out_bias, activation_fn, dropout_p, training_,
fused, fused_function, checkpointing):
# TODO: check type x torch.half or torch.float32
if fused and x.is_cuda:
dropout_p_ = dropout_p if training_ else 0.0
weights = [in_weight, out_weight]
biases = [in_bias, out_bias]
x = fused_function(dropout_p_, checkpointing, x, *weights, *biases)
else:
x = F.linear(x, in_weight, in_bias)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = F.linear(x, out_weight, out_bias)
return x
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x, attn, pos_bias = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
position_bias=pos_bias
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
if self.activation_name == "glu":
x = self.fc1(x)
x = self.dropout2(x)
x = self.fc2(x)
else:
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, atb=atb)
x = call_mlp(x, in_weight, out_weight, in_bias, out_bias, self.activation_fn,
self.dropout2.p, self.training,
self.fused, self.fused_function, False)
# x = self.activation_fn(self.fc1(x))
# x = self.dropout2(x)
# x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn, pos_bias = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=need_weights,
attn_mask=self_attn_mask,
position_bias=pos_bias
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
# if self.activation_name == "glu":
# x = self.fc1(x)
# else:
# x = self.activation_fn(self.fc1(x))
# x = self.dropout2(x)
# x = self.fc2(x)
if self.activation_name == "glu":
x = self.fc1(x)
x = self.dropout2(x)
x = self.fc2(x)
else:
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, atb=atb)
x = call_mlp(x, in_weight, out_weight, in_bias, out_bias, self.activation_fn,
self.dropout2.p, self.training,
self.fused, self.fused_function, False)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn, pos_bias | 35,759 | 37.123667 | 234 | py |
NMTGMinor | NMTGMinor-master/onmt/models/speech_recognizer/fairseq_wav2vec2/fairseq_modules.py | import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import math
from typing import Dict, Optional, Tuple
import torch
from torch.cuda.amp import custom_fwd, custom_bwd
from onmt.modules.optimized.self_attention_func import self_attn_func, self_attn_compact_func
from onmt.modules.optimized.relative_self_attention_func import relative_self_attn_func
from onmt.modules.optimized.linear import linear_function, factorize_linear
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
# if torch.jit.is_scripting():
# export = True
# if not export and torch.cuda.is_available() and has_fused_layernorm:
# return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
class GumbelVectorQuantizer(nn.Module):
def __init__(
self,
dim,
num_vars,
temp,
groups,
combine_groups,
vq_dim,
time_first,
activation=nn.GELU(),
weight_proj_depth=1,
weight_proj_factor=1,
):
"""Vector quantization using gumbel softmax
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor)
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1
weight_proj_depth: number of layers (with activation in between) to project input before computing logits
weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of
projections by this factor
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim))
nn.init.uniform_(self.vars)
if weight_proj_depth > 1:
def block(input_dim, output_dim):
return nn.Sequential(nn.Linear(input_dim, output_dim), activation)
inner_dim = self.input_dim * weight_proj_factor
self.weight_proj = nn.Sequential(
*[
block(self.input_dim if i == 0 else inner_dim, inner_dim)
for i in range(weight_proj_depth - 1)
],
nn.Linear(inner_dim, groups * num_vars),
)
else:
self.weight_proj = nn.Linear(self.input_dim, groups * num_vars)
nn.init.normal_(self.weight_proj.weight, mean=0, std=1)
nn.init.zeros_(self.weight_proj.bias)
if isinstance(temp, str):
import ast
temp = ast.literal_eval(temp)
assert len(temp) == 3, f"{temp}, {len(temp)}"
self.max_temp, self.min_temp, self.temp_decay = temp
self.curr_temp = self.max_temp
self.codebook_indices = None
def set_num_updates(self, num_updates):
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def get_codebook_indices(self):
if self.codebook_indices is None:
from itertools import product
p = [range(self.num_vars)] * self.groups
inds = list(product(*p))
self.codebook_indices = torch.tensor(
inds, dtype=torch.long, device=self.vars.device
).flatten()
if not self.combine_groups:
self.codebook_indices = self.codebook_indices.view(
self.num_vars ** self.groups, -1
)
for b in range(1, self.groups):
self.codebook_indices[:, b] += self.num_vars * b
self.codebook_indices = self.codebook_indices.flatten()
return self.codebook_indices
def codebook(self):
indices = self.get_codebook_indices()
return (
self.vars.squeeze(0)
.index_select(0, indices)
.view(self.num_vars ** self.groups, -1)
)
def sample_from_codebook(self, b, n):
indices = self.get_codebook_indices()
indices = indices.view(-1, self.groups)
cb_size = indices.size(0)
assert (
n < cb_size
), f"sample size {n} is greater than size of codebook {cb_size}"
sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,))
indices = indices[sample_idx]
z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1)
return z
def to_codebook_index(self, indices):
res = indices.new_full(indices.shape[:-1], 0)
for i in range(self.groups):
exponent = self.groups - i - 1
res += indices[..., i] * (self.num_vars ** exponent)
return res
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars * self.groups}
# B x H x T -> B x T x H if not time first
if not self.time_first:
x = x.transpose(1, 2)
bsz, tsz, fsz = x.shape
x = x.reshape(-1, fsz)
# from fsz -> group * num_vars
x = self.weight_proj(x)
x = x.view(bsz * tsz * self.groups, -1)
# choose the (indices of) max var in num_vars
_, k = x.max(-1)
# hard_x has the original size of x
# 1 for chosen value, 0 for non-chosen value
hard_x = (
x.new_zeros(*x.shape)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
# mean over the bsz * tsz dimension?
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
# code probabilities for each group
avg_probs = torch.softmax(
x.view(bsz * tsz, self.groups, -1).float(), dim=-1
).mean(dim=0)
result["prob_perplexity"] = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
).sum()
result["temp"] = self.curr_temp
if self.training:
x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x)
else:
x = hard_x
x = x.view(bsz * tsz, -1)
vars = self.vars
if self.combine_groups:
vars = vars.repeat(1, self.groups, 1)
if produce_targets:
result["targets"] = (
x.view(bsz * tsz * self.groups, -1)
.argmax(dim=-1)
.view(bsz, tsz, self.groups)
.detach()
)
# x size: [bsz * tsz * self.groups, self.num_vars]
# the last dimension is basically distribution over different vars (for each group)
x = x.unsqueeze(-1) * vars
# vars is "probably" latent variable embeddings
x = x.view(bsz * tsz, self.groups, self.num_vars, -1)
x = x.sum(-2)
x = x.view(bsz, tsz, -1)
if not self.time_first:
x = x.transpose(1, 2) # BTC -> BCT
result["x"] = x
return result
class SamePad(nn.Module):
def __init__(self, kernel_size, causal=False):
super().__init__()
if causal:
self.remove = kernel_size - 1
else:
self.remove = 1 if kernel_size % 2 == 0 else 0
def forward(self, x):
if self.remove > 0:
x = x[:, :, : -self.remove]
return x
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
weight_drop=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
favor=False,
generalized_attention=False,
nb_features=256,
**kwargs,
):
super().__init__()
self.rotary_position = False
self.pos_proj_weight = None
self.relative = False
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_p = dropout
self.weight_drop = weight_drop
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.favor = favor
if self.favor:
from onmt.modules.performer import Performer
self.performer = Performer(self.head_dim, nb_features, generalized_attention=generalized_attention)
else:
self.performer = None
self.onnx_trace = False
self.fast_attention = False
self.is_factorized = False
self.multiplicative_factorize = False
self.fast_factorize = False
# from onmt.modules.optimized.fast_mha import fast_bert_mha, fast_self_attn_func
# self.fast_bert_mha = fast_bert_mha
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
def fix_projection_matrices_(self):
if self.proj_updater:
self.proj_updater.feature_redraw_interval = None
def add_factorized_weights(self, n_languages, rank=4, multiplicative=False, fast=False, dyrank=False, **kwargs):
embed_dim = self.embed_dim
self.is_factorized = True
self.multiplicative_factorize = multiplicative
self.fast_factorize = fast
self.dyrank = dyrank
if self.fast_factorize:
assert self.multiplicative_factorize is True
if multiplicative:
_rank = rank if fast else 1
self.rm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, 3 * embed_dim))
self.sm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
if self.relative:
self.rm_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.sm_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
constant = 1
nn.init.constant_(self.rm_i, constant)
nn.init.constant_(self.sm_i, constant)
nn.init.constant_(self.rm_o, constant)
nn.init.constant_(self.sm_o, constant)
if self.relative:
nn.init.constant_(self.rm_p, constant)
nn.init.constant_(self.sm_p, constant)
if not fast:
self.r_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, 3 * embed_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if self.relative:
self.r_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if self.dyrank:
nn.init.zeros_(self.r_i)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.zeros_(self.r_o)
nn.init.normal_(self.s_o, 0.0, 0.02)
if self.relative:
nn.init.zeros_(self.r_p)
nn.init.normal_(self.s_p, 0.0, 0.02)
else:
std = 0.01 if fast else 0.02
nn.init.normal_(self.r_i, 0.0, std)
nn.init.normal_(self.s_i, 0.0, std)
nn.init.normal_(self.r_o, 0.0, std)
nn.init.normal_(self.s_o, 0.0, std)
if self.relative:
nn.init.normal_(self.r_p, 0.0, std)
nn.init.normal_(self.s_p, 0.0, std)
def convert_fast_attention(self):
# print("Convert from vanilla to fast attention module ...")
if self.fast_attention:
return
self.fast_attention = True
assert self.qkv_same_dim, "Only works with QKV same dim."
w_q = self.q_proj.weight.clone()
w_k = self.k_proj.weight.clone()
w_v = self.v_proj.weight.clone()
weights = [w_q, w_k, w_v]
weight_ = torch.cat(weights, dim=0).contiguous()
b_q = self.q_proj.bias.clone()
b_k = self.k_proj.bias.clone()
b_v = self.v_proj.bias.clone()
biases = [b_q, b_k, b_v]
bias_ = torch.cat(biases, dim=0).contiguous()
head_dim = self.head_dim
heads = self.num_heads
input_dim = self.embed_dim
# when we concatenate the weights, the output has the size 3 * D (3 -> heads -> head_dim)
# the fast attention module requires (heads -> 3 -> head_dim)
weight_ = weight_.reshape(3 * head_dim * heads, input_dim).view(3, heads, head_dim, input_dim).transpose(0, 1). \
reshape(-1, input_dim)
bias_ = bias_.reshape(3 * head_dim * heads).view(3, heads, head_dim).transpose(0, 1).reshape(-1)
weight_t = torch.Tensor(3 * input_dim, input_dim)
bias_t = torch.Tensor(3 * input_dim)
weight_t.copy_(weight_)
bias_t.copy_(bias_)
self.proj_weight = Parameter(weight_t)
self.proj_bias = Parameter(bias_t)
self.proj_weight.requires_grad = self.q_proj.weight.requires_grad
self.proj_bias.requires_grad = self.q_proj.bias.requires_grad
del self.q_proj, self.k_proj, self.v_proj
def add_relative_attention(self):
self.relative = True
self.pos_proj_weight = Parameter(torch.Tensor(self.embed_dim, self.embed_dim))
self.pos_proj_bias = Parameter(torch.Tensor(self.embed_dim))
self.r_w_bias = Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.r_r_bias = Parameter(torch.Tensor(self.num_heads, self.head_dim))
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.pos_proj_weight, 0.0, std_)
# nn.init.uniform_(self.pos_proj_weight, -std_, std_)
nn.init.constant_(self.pos_proj_bias, 0.)
nn.init.normal_(self.r_w_bias, 0.0, 0.02)
nn.init.normal_(self.r_r_bias, 0.0, 0.02)
def add_rotary_attention(self):
self.rotary_position = True
assert not self.relative
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
positions: Optional[Tensor] = None,
attn_mask: Optional[Tensor] = None,
cu_seqlens=None, max_len=None,
lang=None, atb=None,
checkpointing=False, **kwargs
) -> Tuple[Tensor, Optional[Tensor]]:
"""
:param checkpointing:
:param positions:
:param query:
:param key:
:param value:
:param key_padding_mask:
:param attn_mask:
:param cu_seqlens:
:param max_len:
:param lang:
:param atb:
:param kwargs:
:return:
"""
is_tpu = query.device.type == "xla"
checkpointing = False # temporarily not checkpoint atm
if not self.favor:
if not self.fast_attention:
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
need_weight = False
assert key is not None and value is not None
assert self.relative == False
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_p,
self.out_proj.weight,
self.out_proj.bias,
self.training,
key_padding_mask,
need_weight,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
else:
in_proj_weight = F.dropout(self.proj_weight, self.weight_drop, training=self.training)
out_proj_weight = F.dropout(self.out_proj.weight, self.weight_drop, training=self.training)
pos_proj_weight = F.dropout(self.pos_proj_weight, self.weight_drop, training=self.training) \
if self.pos_proj_weight is not None else None
if self.is_factorized and self.fast_factorize:
if self.relative:
print("fast factorization is not implemented for relative attention yet")
raise NotImplementedError
hidden_states = query
n_languages, _rank = self.rm_i.size(0), self.rm_i.size(1)
# TODO: mm instead of index select
if lang.ndim == 1:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
elif lang.ndim == 2: # for flash attention
rm_i = torch.mm(lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(
lang.size(0), _rank,
self.rm_i.size(-1))
sm_i = torch.mm(lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(
lang.size(0), _rank,
self.sm_i.size(-1))
rm_o = torch.mm(lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
lang.size(0), _rank,
self.rm_o.size(-1))
sm_o = torch.mm(lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
lang.size(0), _rank,
self.sm_o.size(-1))
elif lang.ndim == 3:
_len, _bsz = lang.size(0), lang.size(1)
_lang = lang.view(_len * _bsz, lang.size(-1))
rm_i = torch.mm(_lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(
_len, _bsz, _rank, self.rm_i.size(-1))
sm_i = torch.mm(_lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(
_len, _bsz, _rank, self.sm_i.size(-1))
rm_o = torch.mm(_lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
_len, _bsz, _rank, self.rm_o.size(-1))
sm_o = torch.mm(_lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
_len, _bsz, _rank, self.sm_o.size(-1))
if hidden_states.ndim == 3:
bsz, qlen = hidden_states.size(1), hidden_states.size(0)
low_precision = True # Use CUDA impl
input_lin_results = factorize_linear(hidden_states, in_proj_weight, self.proj_bias, rm_i, sm_i)
rotary = self.rotary_position
attn_output, coverage = self_attn_compact_func(False, is_training, self.num_heads, input_lin_results,
key_padding_mask, self.dropout_p,
rotary, positions,
False, None, # incremental and state
low_precision,
True, checkpointing) # low-precision and return coverage
# outputs, coverage = self_attn_func(False, is_training, self.num_heads, inputs,
# in_proj_weight, out_proj_weight,
# self.proj_bias, self.out_proj.bias,
# key_padding_mask, self.dropout_p,
# rotary, positions,
# False, None, # incremental and state
# low_precision,
# True, checkpointing) # low-precision and return coverage
attn_output = attn_output.view(qlen, bsz, -1).contiguous()
output = factorize_linear(attn_output, out_proj_weight, self.out_proj.bias, rm_o, sm_o)
return output, coverage
else:
# this doesn't need checkpointing because fmha is doing checkpointing
assert self.fast_bert_mha is not None
assert query.dtype == torch.half
assert cu_seqlens is not None
assert max_len is not None # and max_len <= 512
assert self.relative == False
total_bsz = query.size(0)
# qkv = F.linear(query, in_proj_weight, self.proj_bias) # B x H
qkv = factorize_linear(hidden_states, in_proj_weight, self.proj_bias, rm_i, sm_i)
# B x 3 x H x d
# transpose 1 2 is necessary here because the weights are designed to be heads x 3 x d
# (for the more simple version without transposing)
if not self.rotary_position:
qkv = qkv.view(total_bsz, self.num_heads, 3, self.head_dim).transpose(1, 2).contiguous()
else:
assert positions is not None
cos, sin = positions
queries, keys, values = qkv.view(total_bsz, self.num_heads, 3, self.head_dim)
queries, keys = apply_rotary_pos_emb(queries, keys, cos, sin)
qkv = torch.stack([queries, keys, values], dim=2).transpose(1, 2).contiguous()
dropout_p = self.dropout_p if self.training else 0.0
causal = False
softmax_scale = 1.0 / math.sqrt(64)
# False = return softmax
context = self.fast_bert_mha(qkv, cu_seqlens, max_len, dropout_p, softmax_scale, causal, False)
coverage = None
context = context.view(-1, self.num_heads * self.head_dim).contiguous()
output = factorize_linear(context, out_proj_weight, self.out_proj.bias, rm_o, sm_o)
return output, coverage
if self.is_factorized:
if self.multiplicative_factorize:
# squeeze possible because only 1
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0)
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.relative:
rm_p = torch.index_select(self.rm_p, 0, lang).squeeze(0)
sm_p = torch.index_select(self.sm_p, 0, lang).squeeze(0)
if self.dyrank:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
if self.relative:
pos_factor = torch.mm(rm_p.t(), sm_p)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
if self.relative:
pos_factor = torch.bmm(rm_p.unsqueeze(-1), sm_p.unsqueeze(1)).sum(dim=0)
in_proj_weight = in_proj_weight * mul_factor_in
out_proj_weight = out_proj_weight * mul_factor_out
if self.relative:
pos_proj_weight = pos_proj_weight * pos_factor
# TODO: dyrank select rank
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.relative:
r_p = torch.index_select(self.r_p, 0, lang).squeeze(0)
s_p = torch.index_select(self.s_p, 0, lang).squeeze(0)
if self.dyrank:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
if self.relative: pos_factor = torch.mm(r_p.t(), s_p)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
if self.relative: pos_factor = torch.bmm(r_p.unsqueeze(-1), s_p.unsqueeze(1)).sum(dim=0)
in_proj_weight = in_proj_weight + add_factor_in
out_proj_weight = out_proj_weight + add_factor_out
if self.relative:
pos_proj_weight = pos_proj_weight + pos_factor
# Forward Pass starts here
if query.ndim == 3:
# Call semi-fast attention from CUDA/
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
inputs = query
is_training = self.training
low_precision = True
if self.relative:
recompute = checkpointing
outputs, coverage = relative_self_attn_func(inputs, positions, False,
is_training, self.num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
self.proj_bias, self.out_proj.bias, self.pos_proj_bias,
self.r_w_bias, self.r_r_bias,
key_padding_mask, self.dropout_p,
False, None, False, # incremental and state and double precision
False, True, recompute) # learnable_pos + return-coverage
else:
rotary = self.rotary_position
outputs, coverage = self_attn_func(False, is_training, self.num_heads, inputs,
in_proj_weight, out_proj_weight,
self.proj_bias, self.out_proj.bias,
key_padding_mask, self.dropout_p,
rotary, positions,
False, None, # incremental and state
low_precision,
True, checkpointing) # low-precision and return coverage
return outputs, coverage
# Fused attention using packed data (B T H) -> (BxT H) and removing padded positions
elif query.ndim == 2:
# this doesn't need checkpointing because fmha is doing checkpointing
assert self.fast_bert_mha is not None
assert query.dtype == torch.half
assert cu_seqlens is not None
assert max_len is not None # and max_len <= 512
assert self.relative == False
total_bsz = query.size(0)
qkv = F.linear(query, in_proj_weight, self.proj_bias) # B x H
# B x 3 x H x d
# transpose 1 2 is necessary here because the weights are designed to be heads x 3 x d
# (for the more simple version without transposing)
if not self.rotary_position:
qkv = qkv.view(total_bsz, self.num_heads, 3, self.head_dim).transpose(1, 2).contiguous()
else:
assert positions is not None
cos, sin = positions
queries, keys, values = qkv.view(total_bsz, self.num_heads, 3, self.head_dim)
queries, keys = apply_rotary_pos_emb(queries, keys, cos, sin)
qkv = torch.stack([queries, keys, values], dim=2).transpose(1, 2).contiguous()
dropout_p = self.dropout_p if self.training else 0.0
causal = False
softmax_scale = 1.0 / math.sqrt(64)
# False = return softmax
context = self.fast_bert_mha(qkv, cu_seqlens, max_len, dropout_p, softmax_scale, causal, False)
coverage = None
context = context.view(-1, self.num_heads * self.head_dim).contiguous()
outputs = F.linear(context, out_proj_weight, self.out_proj.bias)
return outputs, coverage
else:
# using performer attention
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
bsz, len_q, hidden = q.size(0), q.size(1), q.size(2)
h, d = self.num_heads, self.head_dim
len_k, len_v = k.size(1), v.size(1)
q = q.view(bsz, len_q, self.num_heads, self.head_dim).permute(0, 2, 1, 3).reshape(bsz * h, len_q, d)
k = k.view(bsz, len_q, self.num_heads, self.head_dim).permute(0, 2, 1, 3).reshape(bsz * h, len_k, d)
v = v.view(bsz, len_q, self.num_heads, self.head_dim).permute(0, 2, 1, 3) # .reshape(bsz * h, len_v, d)
# 1 for padded positions, 0 for non-padded positions
if key_padding_mask is not None:
key_padding_mask = key_padding_mask[:, None, :, None]
v.masked_fill_(key_padding_mask, 0)
v = v.reshape(bsz * h, len_v, d)
out, attn = self.performer(q, k, v)
# out = out.transpose(1, 2).view(bsz, out.size(-2), -1)
out = out.reshape(bsz, h, len_q, -1).permute(0, 2, 1, 3).reshape(bsz, len_v, -1)
out = self.out_proj(out)
return out, attn
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in torch < 1.8.0
def apply_rotary_pos_emb(q, k, cos, sin):
return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
class IndexCopy(torch.autograd.Function):
"""
This function is kinda similar to rnn pad_packed_sequence
It remaps nonpadded values for a (N-1)-d tensor into a (N)-d tensor
"""
@staticmethod
@custom_fwd
def forward(ctx, input, non_pad_indices, total_batch_size):
"""
:param ctx:
:param input: 2D [bsz x ... ] bsz is the total number of elements after unpadding
:param non_pad_indices: bsz * seq_len
:param total_batch_size: (int) bsz * seq_len (before unpadding) > bsz
:return:
In the forward pass we create a new zero tensor and copy the inputs into it based on non_pad_indices
"""
sizes = list(input.size())
sizes[0] = total_batch_size
output = input.new_zeros(*sizes)
output.index_copy_(0, non_pad_indices, input)
ctx.save_for_backward(non_pad_indices)
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grads):
"""
:param ctx:
:param output_grads:
:return:
In the backward pass we simply
"""
non_pad_indices, = ctx.saved_tensors
grad_input = output_grads.index_select(0, non_pad_indices)
return grad_input, None, None
index_copy = IndexCopy.apply
#
# class ConvDownsampler(nn.Module):
# def __init__(
# self,
# conv_layers: List[Tuple[int, int, int]], # n_in, n_out, kernel size?
# dropout: float = 0.0,
# mode: str = "default",
# conv_bias: bool = False,
# ):
# super().__init__()
#
# assert mode in {"default", "layer_norm"}
#
# def block(
# n_in,
# n_out,
# k,
# stride,
# is_layer_norm=False,
# is_group_norm=False,
# conv_bias=False,
# ):
# def make_conv():
# conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
# nn.init.kaiming_normal_(conv.weight)
# return conv
#
# assert (
# is_layer_norm and is_group_norm
# ) == False, "layer norm and group norm are exclusive"
#
# if is_layer_norm:
# return nn.Sequential(
# make_conv(),
# nn.Dropout(p=dropout),
# nn.Sequential(
# TransposeLast(),
# LayerNorm(dim),
# TransposeLast(),
# ),
# nn.GELU(),
# )
# elif is_group_norm:
# return nn.Sequential(
# make_conv(),
# nn.Dropout(p=dropout),
# Fp32GroupNorm(dim, dim, affine=True),
# nn.GELU(),
# )
# else:
# return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
#
# in_d = 1
# self.conv_layers = nn.ModuleList()
# for i, cl in enumerate(conv_layers):
# assert len(cl) == 3, "invalid conv definition: " + str(cl)
# (dim, k, stride) = cl
#
# self.conv_layers.append(
# block(
# in_d,
# dim,
# k,
# stride,
# is_layer_norm=mode == "layer_norm",
# is_group_norm=mode == "default" and i == 0,
# conv_bias=conv_bias,
# )
# )
# in_d = dim
#
# def forward(self, x):
#
# # BxT -> BxCxT (only for waveforms with 1 channel)
# x = x.unsqueeze(1)
#
# for conv in self.conv_layers:
# x = conv(x)
#
# return x | 41,973 | 39.870497 | 128 | py |
NMTGMinor | NMTGMinor-master/onmt/models/bayes_by_backprop/relative_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import PrePostProcessing
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
from onmt.models.bayes_by_backprop.relative_transformer_layers import \
TransformerEncoderLayer, TransformerDecoderLayer
torch.set_printoptions(threshold=500000)
# Positional Embedding with discrete inputs
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, demb):
super(SinusoidalPositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, sin_first=True, bsz=None):
"""
:param bsz:
:param pos_seq: sequences of RELATIVE position indices (can be negative for future)
:param sin_first: in Attention is all you need paper, sin is first then cosin
"""
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
if sin_first:
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
else:
pos_emb = torch.cat([sinusoid_inp.cos(), sinusoid_inp.sin()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].repeat(1, bsz, 1)
else:
return pos_emb[:, None, :]
class RelativeTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.experimental = opt.experimental
self.unidirectional = opt.unidirectional
self.reversible = opt.src_reversible
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
# build_modules will be called from the inherited constructor
super(RelativeTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
# learnable position encoding
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
assert self.opt.src_reversible == False
e_length = expected_length(self.layers, self.death_rate)
print("* Bayes-By-Backprop Relative Transformer Encoder with %.2f expected layers" % e_length)
if self.unidirectional:
print("* Running a unidirectional Encoder.")
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = TransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
bsz_first_input = input
input = input.transpose(0, 1)
# mask_src = input.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x src_len x 1 for broadcasting
dec_attn_mask = bsz_first_input.eq(onmt.constants.PAD).unsqueeze(1)
mem_len = 0
mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # batch_size x src_len x 1 for broadcasting
mems = None
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
# There is no "unsqueeze" here because the input is T x B x H and lang_emb is B x H
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(0)
else:
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].transpose().unsqueeze(0)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
abs_pos = None
mem_len = 0
mems = None
if self.unidirectional:
qlen = input.size(0)
klen = qlen + mem_len
attn_mask_src = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
# pad_mask = mask_src
# mask_src = pad_mask + attn_mask_src
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
# mask_src = mask_src.gt(0)
# with right padding, causal mask covers the mask pad
mask_src = attn_mask_src
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
if self.unidirectional:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1) if self.fast_self_attn else None)
# B x T x H -> T x B x H
context = emb
# Apply dropout to both context and pos_emb
context = self.preprocess_layer(context)
pos_emb = self.preprocess_layer(pos_emb)
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
context = layer(context, pos_emb, mask_src)
# final layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input})
return output_dict
def log_prior(self):
log_prior = 0
for module in self.layer_modules:
log_prior += module.log_prior()
return log_prior
def log_variational_posterior(self):
log_variational_posterior = 0
for module in self.layer_modules:
log_variational_posterior += module.log_variational_posterior()
return log_variational_posterior
class RelativeTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.n_heads = opt.n_heads
self.fast_self_attn = opt.fast_self_attention
# build_modules will be called from the inherited constructor
super(RelativeTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source,
allocate_positions=False)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# Parameters for the position biases - deprecated. kept for backward compatibility
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
self.opt.ignore_source = self.ignore_source
print("* Bayes-By-Backprop Relative Transformer Decoder with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = TransformerDecoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
def log_prior(self):
log_prior = 0
for module in self.layer_modules:
log_prior += module.log_prior()
return log_prior
def log_variational_posterior(self):
log_variational_posterior = 0
for module in self.layer_modules:
log_variational_posterior += module.log_variational_posterior()
return log_variational_posterior
def forward(self, input, context, src, input_pos=None, input_lang=None, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
mem_len = 0
mems = None
extra_context = None
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input is either left or right aligned
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
# pad_mask = input.eq(onmt.constants.PAD).byte() # L x B
#
# dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
# dec_attn_mask = dec_attn_mask.gt(0)
dec_attn_mask = dec_attn_mask.bool()
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos, bsz=input.size(1) if self.fast_self_attn else None)
output = self.preprocess_layer(emb.contiguous())
pos_emb = self.preprocess_layer(pos_emb)
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
mask_src = decoder_state.src_mask
buffering = decoder_state.buffering
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
else:
input_ = input.transpose(0, 1) # from B x T to T x B
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
if input.size(0) == 1:
emb[0] = lang_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()[:, :, None]
if onmt.constants.torch_version >= 1.2:
dec_attn_mask = dec_attn_mask.bool()
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
if buffering:
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output, coverage, _ = layer(output, context, pos_emb, dec_attn_mask, mask_src)
# normalize and take the last time step
output = self.postprocess_layer(output)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
class BayesianTransformer(Transformer):
def log_prior(self):
return self.encoder.log_prior() + self.decoder.log_prior()
def log_variational_posterior(self):
return self.encoder.log_variational_posterior() + self.decoder.log_variational_posterior() | 18,937 | 38.372141 | 114 | py |
NMTGMinor | NMTGMinor-master/onmt/models/bayes_by_backprop/relative_transformer_layers.py | import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.utils import flip
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.bayes_by_backprop.encdec_attention import EncdecMultiheadAttn
from onmt.modules.bayes_by_backprop.feed_forward import PositionWiseFeedForward
from onmt.modules.bayes_by_backprop.relative_self_attention import RelativeSelfMultiheadAttn
class TransformerEncoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, variational=False, death_rate=0.0, **kwargs):
def __init__(self, opt, death_rate=0.0, **kwargs):
super().__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
def log_prior(self):
log_prior = 0
log_prior += self.multihead.log_prior
self.multihead.log_prior = 0
log_prior += self.feedforward.log_prior
self.feedforward.log_prior = 0
return log_prior
def log_variational_posterior(self):
log_variational_posterior = 0
log_variational_posterior += self.multihead.log_variational_posterior
self.multihead.log_variational_posterior = 0
log_variational_posterior += self.feedforward.log_variational_posterior
self.feedforward.log_variational_posterior = 0
return log_variational_posterior
def forward(self, input, pos_emb, attn_mask, incremental=False, incremental_cache=None, mems=None):
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
if incremental:
return input, incremental_cache
return input
class TransformerDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
def forward(self, input, context, pos_emb, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
else:
coverage = None
return input, coverage, incremental_cache
def log_prior(self):
log_prior = 0
log_prior += self.multihead_src.log_prior
self.multihead_src.log_prior = 0
log_prior += self.multihead_tgt.log_prior
self.multihead_tgt.log_prior = 0
log_prior += self.feedforward.log_prior
self.feedforward.log_prior = 0
return log_prior
def log_variational_posterior(self):
log_variational_posterior = 0
log_variational_posterior += self.multihead_src.log_variational_posterior
self.multihead_src.log_variational_posterior = 0
log_variational_posterior += self.multihead_tgt.log_variational_posterior
self.multihead_tgt.log_variational_posterior = 0
log_variational_posterior += self.feedforward.log_variational_posterior
self.feedforward.log_variational_posterior = 0
return log_variational_posterior
| 8,703 | 40.447619 | 103 | py |
NMTGMinor | NMTGMinor-master/onmt/models/multilingual_translator/reversible_transformers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.layer_norm import LayerNorm
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from onmt.modules.dropout import variational_dropout
class RelativeSelfAttention(nn.Module):
def __init__(self, opt):
super().__init__()
# self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.layer_norm = LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.attn = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, dropout=opt.attn_dropout,
learnable_pos=opt.learnable_position_encoding,
max_pos=opt.max_pos_length)
self.variational = opt.variational_dropout
def forward(self, input, pos, key_padding_mask=None, attn_mask=None, incremental=False,
incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage = self.attn(q, pos, key_padding_mask=key_padding_mask,
attn_mask=attn_mask,
incremental=incremental, incremental_cache=incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage
class FeedForward(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=opt.variational_dropout, glu=opt.ffn_glu,
activation=opt.ffn_activation)
self.variational = opt.variational_dropout
def forward(self, input, cleaning=False):
x_norm = self.layer_norm(input)
x_ff = self.feedforward(x_norm)
if not self.variational:
o = F.dropout(x_ff, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(x_ff, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del x_norm, x_ff
return o
class SourceAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.attn = EncdecMultiheadAttn(opt.n_heads, opt.model_size, attn_drop=opt.attn_dropout)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, context, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage = self.attn(q, context, context, attn_mask, incremental, incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage
class ReversibleEncoderFunction(Function):
@staticmethod
def forward(ctx, layers, hidden_states, pos, attn_mask):
# attn_output, hidden_states = hidden_states, hidden_states # torch.chunk(hidden_states, 2, dim=-1)
first_input, second_input = hidden_states, hidden_states
# this block should be run under torch.no_grad()?
with torch.no_grad():
for layer in layers:
# forward pass in the layer
first_input, second_input = layer(
first_input, second_input, pos, attn_mask
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
ctx.save_for_backward(first_input.clone().detach(), second_input, pos)
ctx.layers = layers
ctx.attn_mask = attn_mask # just in case attn_mask is None
with torch.no_grad():
output = first_input + second_input
output.div_(2)
# The only memory footprint is the last layer outputs and the "output".
return output
@staticmethod
def backward(ctx, grad_output):
grad_output.mul_(0.5)
first_grad_output, second_grad_output = grad_output, grad_output
# retrieve params from ctx
first_output, second_output, pos = ctx.saved_tensors
layers = ctx.layers
attn_mask = ctx.attn_mask
for idx, layer in enumerate(layers[::-1]):
# backprop
first_output, second_output, first_grad_output, second_grad_output = layer.backward_pass(
first_output, second_output, first_grad_output, second_grad_output, pos, attn_mask
)
grad_hidden_states = first_grad_output + second_grad_output
# the position encodings don't need embeddings
return None, grad_hidden_states, None, None
def reversible_encoder(layers, hidden_states, pos, attn_mask):
return ReversibleEncoderFunction.apply(layers, hidden_states, pos, attn_mask)
class ReversibleTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.self_attn = RelativeSelfAttention(opt)
self.feedforward = FeedForward(opt)
self.death_rate = death_rate
self.forward_coin = True
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, pos, attn_mask=None):
"""
:param pos: position embeddings
:param x2:
:param x1:
:param attn_mask:
:return:
"""
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
self._init_attention_seed(x2, pos, attn_mask)
z1, coverage = self.self_attn(x2, pos, key_padding_mask=attn_mask, attn_mask=None, cleaning=True)
y1 = z1 + x1
self._init_feedforward_seed(y1)
z2 = self.feedforward(y1, cleaning=True)
y2 = z2 + x2
del x1, x2, z1, z2
"""return Y1 and Y2"""
return y1, y2
def backward_pass(self, y1, y2, dy1, dy2, pos, attn_mask=None):
"""
:param pos:
:param y1:
:param y2:
:param dy1:
:param dy2:
:param attn_mask:
:return:
"""
"""Implementation of the backward pass for reversible transformer encoder"""
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
gy1 = self.feedforward(y1)
gy1.backward(dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - gy1
del gy1, y2
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
fx2, _, = self.self_attn(x2, pos, key_padding_mask=attn_mask)
fx2.backward(dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
return x1, x2, dx1, dx2
##################################################
############## DECODER FUNCTION ##################
##################################################
class ReversibleDecoderFunction(Function):
@staticmethod
def forward(ctx, layers, hidden_states, pos, context, tgt_mask, src_mask,
incremental=False, incremental_cache=None):
bsz, seq_len = hidden_states.shape[0], hidden_states.shape[1]
B = bsz * seq_len
idx = 0
x1, x2 = hidden_states, hidden_states
coverages = []
for layer in layers:
idx = idx + 1
# forward pass in the layer
x1, x2, coverage_src = layer(
x1, x2, pos, context, tgt_mask, src_mask,
incremental=incremental, incremental_cache=incremental_cache
)
coverages.append(coverage_src)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
# detach() seems to be required especially for context ...
ctx.save_for_backward(x1.clone().detach(), x2, context, pos)
ctx.layers = layers
ctx.src_mask = src_mask
ctx.tgt_mask = tgt_mask
with torch.no_grad():
output = x1 + x2
# concatenate 2 revnet outputs:
return output.mul_(0.5), torch.stack(coverages)
@staticmethod
def backward(ctx, grad_hidden_states, grad_coverage):
# We need three arguments because the forward pass returned 3 arguments
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_hidden_states.mul_(0.5)
dx1, dx2 = grad_hidden_states, grad_hidden_states
# retrieve params from ctx
x1, x2, context, pos = ctx.saved_tensors
layers = ctx.layers
src_mask = ctx.src_mask
tgt_mask = ctx.tgt_mask
grad_context = None # we need to sum up the gradients of the context manually
for idx, layer in enumerate(layers[::-1]):
"""Note: Here for each layer we detach the context once because we need to consider it
as a separate variable and then later accumulate the gradients"""
x1, x2, dx1, dx2, grad_context_ = layer.backward_pass(
x1, x2, dx1, dx2,
pos, context.detach(), tgt_mask, src_mask
)
if grad_context is None:
grad_context = grad_context_
elif grad_context_ is not None: # prevent ignoring layer making this None
grad_context.add_(grad_context_)
del grad_context_
grad_input = dx1 + dx2
# grad pos is also None
return None, grad_input, None, grad_context, None, None, None, None
def reversible_decoder(layers, hidden_states, pos, context, tgt_mask, src_mask, incremental, incremental_cache):
return ReversibleDecoderFunction.apply(layers, hidden_states, pos, context,
tgt_mask, src_mask, incremental, incremental_cache)
class ReversibleTransformerDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super(ReversibleTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
assert not self.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.self_attention = RelativeSelfAttention(opt)
self.feed_forward_first = FeedForward(opt)
if not self.ignore_source:
self.src_attention = SourceAttention(opt)
self.feed_forward_second = FeedForward(opt)
def _init_src_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.src_attn_cpu_state = torch.get_rng_state()
self.src_attn_gpu_devices, self.src_attn_gpu_states = get_device_states(*args)
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward1_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn1_cpu_state = torch.get_rng_state()
self.ffn1_gpu_devices, self.ffn1_gpu_states = get_device_states(*args)
def _init_feedforward2_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn2_cpu_state = torch.get_rng_state()
self.ffn2_gpu_devices, self.ffn2_gpu_states = get_device_states(*args)
def forward(self, x1, x2, pos, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param pos:
:param x1: X1
:param x2: X2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if self.training:
# coin = (torch.rand(1)[0].item() >= self.death_rate)
#
# self.forward_coin = coin
with torch.no_grad():
# prepare the state for the first function (att > src->att)
self._init_attention_seed(x2, pos)
f_x2, coverage, = self.self_attention(x2, pos,
key_padding_mask=None, attn_mask=mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
z1 = f_x2 + x1
self._init_feedforward1_seed(z1)
g_z1 = self.feed_forward_first(z1, cleaning=True)
z2 = x2 + g_z1
self._init_src_attention_seed(z2, context, mask_src)
h_z2, coverage_src = self.src_attention(z2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
y1 = z1 + h_z2
# prepare the state for the second function
self._init_feedforward2_seed(y1)
k_y1 = self.feed_forward_second(y1, cleaning=True)
# if self.training and self.death_rate > 0:
# g_y1 = g_y1 / (1 - self.death_rate)
y2 = z2 + k_y1
"""return Y1 and Y2"""
return y1, y2, coverage_src
def backward_pass(self, y1, y2, dy1, dy2, pos, context,
mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=False):
"""
:param pos:
:param y1
:param y2
:param dy1: dL/dX2
:param dy2: dL/dY2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if not self.forward_coin: # this layer was skipped, just return
# return y1, y2, dy1, dy2, None
# first block: recompute the ffn transition function
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn2_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn2_cpu_state)
set_device_states(self.ffn2_gpu_devices, self.ffn2_gpu_states)
k_y1 = self.feed_forward_second(y1)
k_y1.backward(dy2)
with torch.no_grad():
z2 = y2 - k_y1
del k_y1, y2
# Dz1 = DY1 + Y1.grad
dz1 = dy1 + y1.grad
del dy1
y1.grad = None
# second block
with torch.enable_grad():
z2.requires_grad = True
context.requires_grad = True
with torch.random.fork_rng(devices=self.src_attn_gpu_devices, enabled=True):
torch.set_rng_state(self.src_attn_cpu_state)
set_device_states(self.src_attn_gpu_devices, self.src_attn_gpu_states)
# if not self.ignore_source:
h_z2, _ = self.src_attention(z2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
# torch.autograd.backward(h_z2, dz1)
h_z2.backward(dz1)
with torch.no_grad():
z1 = y1 - h_z2
del y1, h_z2
dz2 = dy2 + z2.grad
z2.grad = None
del dy2
grad_context = context.grad
del context.grad
# third block
with torch.enable_grad():
z1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn1_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn1_cpu_state)
set_device_states(self.ffn1_gpu_devices, self.ffn1_gpu_states)
g_z1 = self.feed_forward_first(z1)
# torch.autograd.backward(g_z1, dz2)
g_z1.backward(dz2)
#
with torch.no_grad():
x2 = z2 - g_z1
del z2, g_z1
dx1 = dz1 + z1.grad
z1.grad = None
del dz1
# fourth block
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
f_x2, _, = self.self_attention(x2, pos,
key_padding_mask=None, attn_mask=mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
f_x2.backward(dx1)
with torch.no_grad():
x1 = z1 - f_x2
del z1, f_x2
dx2 = dz2 + x2.grad
x2.grad = None
del dz2
return x1, x2, dx1, dx2, grad_context
| 21,390 | 34.89094 | 113 | py |
NMTGMinor | NMTGMinor-master/onmt/models/multilingual_translator/relative_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.modules.sinusoidal_positional_encoding import SinusoidalPositionalEmbedding, FastSinusoidalPositionalEncoding
from onmt.models.transformer_layers import PrePostProcessing
from .relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from .reversible_transformers import ReversibleTransformerEncoderLayer, reversible_encoder
from .reversible_transformers import ReversibleTransformerDecoderLayer, reversible_decoder
from onmt.modules.identity import Identity
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
from torch.utils.checkpoint import checkpoint
torch.set_printoptions(threshold=500000)
def create_forward_function(module):
def forward_pass(*inputs):
return module(*inputs)
return forward_pass
class RelativeTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.unidirectional = opt.unidirectional
self.n_heads = opt.n_heads
self.n_languages = opt.n_languages
self.checkpointing = opt.checkpointing
self.absolute_position_encoding = opt.absolute_position_encoding
self.early_emb_scale = opt.encoder_early_emb_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.rotary_position_encoding = opt.rotary_position_encoding
self.max_pos_length = opt.max_pos_length
self.reversible = opt.src_reversible
# build_modules will be called from the inherited constructor
super(RelativeTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
if not self.early_emb_scale and (self.use_language_embedding or self.absolute_position_encoding):
print("[INFO] Embedding will be scaled after being added with embedding and position encoding."
"\n[INFO] For multilingual models its advisable to use -encoder_early_emb_scale")
# learnable position encoding
if self.learnable_position_encoding:
assert not self.rotary_position_encoding
self.positional_encoder = None
elif self.rotary_position_encoding:
from onmt.modules.rotary_postional_encodings import SinusoidalEmbeddings
self.positional_encoder = SinusoidalEmbeddings(opt.model_size // opt.n_heads)
else:
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
if opt.rezero or opt.post_norm:
self.postprocess_layer = Identity()
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
if self.reversible:
print("* Relative Reversible Encoder with %.2f expected layers" % e_length)
else:
print("* Relative Translation Encoder with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
if self.reversible:
block = ReversibleTransformerEncoderLayer(self.opt, death_rate=death_r)
else:
block = RelativeTransformerEncoderLayer(self.opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
bsz_first_input = input
input = input.transpose(0, 1)
dec_attn_mask = bsz_first_input.eq(onmt.constants.PAD).unsqueeze(1)
mem_len = 0
mems = None
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.early_emb_scale:
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
# There is no "unsqueeze" here because the input is T x B x H and lang_emb is B x H
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(0)
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
if not self.rotary_position_encoding:
if not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # 1 x src_len x batch_size for broadcasting
elif self.rotary_position_encoding:
# generate rotary position encodings as sinusoidal
pos_emb = self.positional_encoder(input, seq_dim=0)
mask_src = input.eq(onmt.constants.PAD).transpose(0, 1) # bsz first
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
if not self.early_emb_scale:
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
# context size is now T x B x H
context = self.preprocess_layer(emb)
if self.reversible:
context = reversible_encoder(self.layer_modules, context, pos_emb, mask_src)
else:
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
context = layer(context, pos_emb, mask_src, src_lang=input_lang)
# if self.checkpointing == 0 or self.training is False:
# context = layer(context, pos_emb, mask_src, src_lang=input_lang)
# else:
# context = checkpoint(create_forward_function(layer), context, pos_emb, mask_src, input_lang)
# final layer norm. we can consider this layer norm as a part of the output layer/function
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input, 'pos_emb': pos_emb})
return output_dict
class RelativeTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.n_heads = opt.n_heads
self.checkpointing = opt.checkpointing
self.late_emb_scale = opt.decoder_late_emb_scale
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_pos_length = opt.max_pos_length
self.reversible = opt.tgt_reversible
self.rotary_position_encoding = opt.rotary_position_encoding
# build_modules will be called from the inherited constructor
super(RelativeTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source,
allocate_positions=False)
if self.learnable_position_encoding:
assert self.rotary_position_encoding is False
self.positional_encoder = None
elif self.rotary_position_encoding:
from onmt.modules.rotary_postional_encodings import SinusoidalEmbeddings
self.positional_encoder = SinusoidalEmbeddings(opt.model_size // opt.n_heads)
else:
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
if opt.rezero or opt.post_norm:
self.postprocess_layer = Identity()
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
self.opt.ignore_source = self.ignore_source
if self.reversible:
print("* Transformer Reversible Decoder with Relative Attention with %.2f expected layers" % e_length)
else:
print("* Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
if not self.reversible:
block = RelativeTransformerDecoderLayer(self.opt, death_rate=death_r)
else:
block = ReversibleTransformerDecoderLayer(self.opt)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None, src_lang=None, tgt_lang=None,
streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if not self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
mem_len = 0
mems = None
extra_context = None
if self.use_language_embedding:
lang_emb = self.language_embeddings(tgt_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input is left aligned so we do not need to add the pad mask
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()
dec_attn_mask = dec_attn_mask.bool()
# relative positions
if self.rotary_position_encoding:
pos_emb = self.positional_encoder(input, seq_dim=0)
pos_emb_src = self.positional_encoder(context, seq_dim=0)
elif not self.learnable_position_encoding:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos, bsz=input.size(1))
pos_emb = self.preprocess_layer(pos_emb)
pos_emb_src = None
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
pos_emb_src = None
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype).long()
# pos.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
# pos_emb = pos.unsqueeze(1)
if self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
output = self.preprocess_layer(emb.contiguous())
if self.reversible:
# TODO: add src lang and tgt lang to reversible
output, coverage = reversible_decoder(self.layer_modules, output, pos_emb, context,
dec_attn_mask.squeeze(-1), mask_src,
False, None) # incremental variables
else:
for i, layer in enumerate(self.layer_modules):
output, coverage = layer(output, context, pos_emb, dec_attn_mask, mask_src,
src_lang=src_lang, tgt_lang=tgt_lang,
pos_emb_src=pos_emb_src)
# if self.checkpointing == 0 or self.training is False:
#
# output, coverage = layer(output, context, pos_emb, dec_attn_mask, mask_src,
# src_lang=src_lang, tgt_lang=tgt_lang)
#
# else:
# output, coverage = checkpoint(create_forward_function(layer), output, context, pos_emb,
# dec_attn_mask,
# mask_src, src_lang, tgt_lang)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, streaming=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
if self.rotary_position_encoding:
buffering = False
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if buffering:
# use the last value of input to continue decoding
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
# input_ = input.transpose(0, 1)
else:
input_ = input.transpose(0, 1) # from B x T to T x B
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
if not self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
# if not self.absolute_position_encoding:
if self.rotary_position_encoding:
pos_emb = self.positional_encoder(input, seq_dim=0)
pos_emb_src = self.positional_encoder(context, seq_dim=0)
else:
pos_emb_src = None
if self.learnable_position_encoding:
if buffering:
distance_mat = torch.arange(-klen + 1, 1, 1, device=emb.device).unsqueeze(0)
else:
range_vec = torch.arange(klen, device=emb.device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_vec - range_mat.transpose(0, 1)
distance_mat.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
pos_emb = distance_mat
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
# pos.clamp_(-self.max_pos_length, self.max_pos_length).add_(self.max_pos_length)
# pos_emb = pos.unsqueeze(1)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
# else:
# if buffering:
# emb = self.positional_encoder(emb.transpose(0, 1), t=input.size(1)).transpose(0, 1)
# else:
# emb = self.positional_encoder(emb.transpose(0, 1)).transpose(0, 1)
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()
dec_attn_mask = dec_attn_mask.bool()
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
if self.late_emb_scale:
emb = emb * math.sqrt(self.model_size)
output = emb.contiguous()
if self.reversible:
incremental = True
incremental_cache = buffer
output, coverage = reversible_decoder.apply(self.layer_modules, output, pos_emb, context,
dec_attn_mask, mask_src,
incremental, incremental_cache)
else:
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
if buffering:
output, coverage, buffer = layer(output, context, pos_emb, dec_attn_mask, mask_src,
tgt_lang=lang, src_lang=src_lang, pos_emb_src=pos_emb_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
else:
output, coverage = layer(output, context, pos_emb, dec_attn_mask, mask_src,
src_lang=src_lang, tgt_lang=lang, pos_emb_src=pos_emb_src)
# normalize and take the last time step
output = self.postprocess_layer(output)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
| 21,668 | 43.58642 | 130 | py |
NMTGMinor | NMTGMinor-master/onmt/models/multilingual_translator/relative_transformer_layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.self_attention import SelfMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.multilingual_factorized.linear import MFWPositionWiseFeedForward
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
from onmt.modules.multilingual_factorized.relative_attention import MFWRelativeSelfMultiheadAttn
from onmt.modules.dropout import variational_dropout
from onmt.modules.identity import Identity
from onmt.modules.optimized.dropout_add import fused_dropout_add
def preprocessing(rezero, model_size, post_norm=False):
sequence = ''
if not rezero and not post_norm:
sequence += 'n'
return PrePostProcessing(model_size, 0.0, sequence=sequence)
def postprocessing(rezero, model_size, dropout, variational=False, post_norm=False,
dropout_residual=True):
sequence = ''
if dropout_residual:
sequence += 'd'
if rezero:
sequence += 'z'
else:
sequence += 'a'
if post_norm:
sequence += 'n'
return PrePostProcessing(model_size, dropout,
sequence=sequence,
variational=variational)
class RelativeTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0, **kwargs):
super(RelativeTransformerEncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.batch_ensemble = opt.batch_ensemble
# self.multilingual_factorized_weights = opt.multilingual_factorized_weights
self.death_rate = death_rate
self.mfw = opt.multilingual_factorized_weights
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.dropout = opt.dropout
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.rezero = opt.rezero
self.rotary_position_encoding = opt.rotary_position_encoding
self.learnable_pos = opt.learnable_position_encoding
self.stochastic_sublayer = opt.stochastic_sublayer
self.post_norm = opt.post_norm
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu,
dropout_residual=self.post_norm,
res_dropout=self.residual_dropout)
self.postprocess_mcr_ffn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm,
dropout_residual=not self.mcr_feedforward.dropout_residual)
self.preprocess_attn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
self.postprocess_attn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm)
self.preprocess_ffn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
d_head = opt.model_size // opt.n_heads
if self.mfw:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias, )
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu,
dropout_residual=opt.post_norm,
res_dropout=self.residual_dropout)
if not self.rotary_position_encoding:
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
elif self.rotary_position_encoding:
self.multihead = SelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
rotary_pos_enc=True)
self.postprocess_ffn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm,
dropout_residual=not self.feedforward.dropout_residual)
def forward(self, input, pos_emb, attn_mask, src_lang=None,
incremental=False, incremental_cache=None, mems=None):
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang)
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
# input = fused_dropout_add(out * ffn_scale, input,self.residual_dropout, self.training)
if self.stochastic_sublayer: # re-toss-coin
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
if self.mfw:
out, _ = self.multihead(query, pos_emb, src_lang, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
# input = fused_dropout_add(out, input, self.residual_dropout, self.training)
if self.stochastic_sublayer: # re-toss-coin
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input), src_lang)
# rescaling before residual
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
# input = fused_dropout_add(out * ffn_scale, input, self.residual_dropout, self.training)
if incremental:
return input, incremental_cache
return input
class RelativeTransformerDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super(RelativeTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.batch_ensemble = opt.batch_ensemble
self.mfw = opt.multilingual_factorized_weights
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.dropout = opt.dropout
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.rezero = opt.rezero
self.n_heads = opt.n_heads
self.learnable_pos = opt.learnable_position_encoding
self.rotary_position_encoding = opt.rotary_position_encoding
self.stochastic_sublayer = opt.stochastic_sublayer
self.post_norm = opt.post_norm
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu,
dropout_residual=opt.post_norm,
res_dropout=self.residual_dropout)
self.postprocess_mcr_ffn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm,
dropout_residual=not self.mcr_feedforward.dropout_residual)
self.preprocess_attn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
self.postprocess_attn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm)
if not self.ignore_source:
self.preprocess_src_attn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
self.postprocess_src_attn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm)
if not self.mfw:
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
else:
self.multihead_src = MFWEncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias, )
self.preprocess_ffn = preprocessing(opt.rezero, opt.model_size, self.post_norm)
d_head = opt.model_size // opt.n_heads
if self.mfw:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead_tgt = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
no_bias=opt.mfw_no_bias, )
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu,
dropout_residual=opt.post_norm,
res_dropout=self.residual_dropout)
if self.rotary_position_encoding:
self.multihead_tgt = SelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
rotary_pos_enc=True)
else:
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
self.postprocess_ffn = postprocessing(opt.rezero, opt.model_size, self.residual_dropout,
self.variational, self.post_norm,
dropout_residual=not self.feedforward.dropout_residual)
def forward(self, input, context, pos_emb, mask_tgt, mask_src,
src_lang=None, tgt_lang=None, pos_emb_src=None,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang)
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
# input = self.postprocess_mcr_ffn(out * ffn_scale, input)
input = fused_dropout_add(out * ffn_scale, input, self.residual_dropout, self.training)
if self.stochastic_sublayer:
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be T x B x H
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if self.mfw:
out, _ = self.multihead_tgt(query, pos_emb, tgt_lang, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
# input = self.postprocess_attn(out, input)
input = fused_dropout_add(out, input, self.residual_dropout, self.training)
if self.stochastic_sublayer:
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
if self.mfw:
out, coverage = self.multihead_src(query, context, context, src_lang, tgt_lang, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
else:
out, coverage = self.multihead_src(query, context, context, mask_src,
rotary_pos_enc=self.rotary_position_encoding,
pos_emb_q=pos_emb,
pos_emb_k=pos_emb_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
# input = self.postprocess_src_attn(out, input)
input = fused_dropout_add(out, input, self.residual_dropout, self.training)
else:
coverage = None
else:
coverage = input.new_zeros(input.size(1), self.n_heads,
input.size(0), context.size(0) if context is None else input.size(0))
if self.stochastic_sublayer:
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input), tgt_lang)
# rescaling before residual
if self.training and self.death_rate > 0:
ffn_scale = self.ffn_scale / (1 - self.death_rate)
else:
ffn_scale = self.ffn_scale
# input = self.postprocess_ffn(out * ffn_scale, input)
input = fused_dropout_add(out * ffn_scale, input, self.residual_dropout, self.training)
if incremental_cache is None:
return input, coverage
else:
return input, coverage, incremental_cache
| 21,330 | 50.524155 | 115 | py |
NMTGMinor | NMTGMinor-master/onmt/models/discourse/discourse_transformer.py | # Transformer with discourse information
from collections import defaultdict
import onmt
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformers import Transformer, TransformerDecodingState
from onmt.modules.pre_post_processing import PrePostProcessing
from .gate_layer import RelativeGateEncoderLayer
class DiscourseTransformerEncoder(nn.Module):
def __init__(self, opt, encoder=None):
self.opt = opt
super(DiscourseTransformerEncoder, self).__init__()
# a shared encoder for all present, past and future
self.encoder = encoder
if hasattr(encoder, 'word_lut'):
self.word_lut = encoder.word_lut
from ..multilingual_translator.relative_transformer_layers \
import RelativeTransformerEncoderLayer
else:
from ..speech_recognizer.relative_transformer_layers \
import RelativeTransformerEncoderLayer
self.past_layer = RelativeTransformerEncoderLayer(self.opt)
self.input_type = encoder.input_type
self.time = None # backward compatible
self.gate_layer = RelativeGateEncoderLayer(self.opt)
self.postprocess_layer = PrePostProcessing(opt.model_size, 0.0, sequence='n')
def forward(self, input, past_input=None, input_lang=None, factorize=False):
assert past_input is not None
# the same encoder is used to encode the previous and current segment
past_encoder_output = self.encoder(past_input, input_lang=input_lang, factorize=factorize)
#
past_context = past_encoder_output['context']
past_pos_emb = past_encoder_output['pos_emb']
encoder_output = self.encoder(input, input_lang=input_lang, factorize=factorize)
# past_mask_src = past_input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
# past_context = self.past_layer(past_context, past_pos_emb, past_mask_src,
# src_lang=input_lang, factorize=factorize)
current_context = encoder_output['context']
current_pos_emb = encoder_output['pos_emb']
if len(input.size()) > 2:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
past_mask = past_input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = input.transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
past_mask = past_input.eq(onmt.constants.PAD).unsqueeze(1)
dec_attn_mask = input.eq(onmt.constants.PAD).unsqueeze(1)
context = self.gate_layer(current_context, past_context, current_pos_emb, mask_src, past_mask,
src_lang=input_lang, factorize=factorize)
# context = current_context
# final layer norm
context = self.postprocess_layer(context, factor=input_lang)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask,
'src': input, 'pos_emb': current_pos_emb})
del past_encoder_output
return output_dict
class DiscourseTransformer(Transformer):
"""Main model in 'Attention is all you need' """
def forward(self, batch, target_mask=None, streaming=False, zero_encoder=False,
mirror=False, streaming_state=None, nce=False, factorize=True, **kwargs):
"""
:param nce: use noise contrastive estimation
:param streaming_state:
:param streaming:
:param mirror: if using mirror network for future anticipation
:param batch: data object sent from the dataset
:param target_mask:
:param zero_encoder: zero out the encoder output (if necessary)
:return:
"""
if self.switchout > 0 and self.training:
batch.switchout(self.switchout, self.src_vocab_size, self.tgt_vocab_size)
src = batch.get('source')
tgt = batch.get('target_input')
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
past_src = batch.get('past_source')
org_src = src
org_tgt = tgt
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
past_src = past_src.transpose(0, 1)
# Encoder has to receive different inputs
encoder_output = self.encoder(src, past_input=past_src, input_lang=src_lang,
factorize=factorize)
encoder_output = defaultdict(lambda: None, encoder_output)
context = encoder_output['context']
# the state is changed
streaming_state = encoder_output['streaming_state']
# zero out the encoder part for pre-training
if zero_encoder:
context.zero_()
decoder_output = self.decoder(tgt, context, src,
src_lang=src_lang, tgt_lang=tgt_lang, input_pos=tgt_pos, streaming=streaming,
src_lengths=src_lengths, tgt_lengths=tgt_lengths,
streaming_state=streaming_state, factorize=factorize)
# update the streaming state again
decoder_output = defaultdict(lambda: None, decoder_output)
streaming_state = decoder_output['streaming_state']
output = decoder_output['hidden']
# build the output dict based on decoder output
output_dict = defaultdict(lambda: None, decoder_output)
output_dict['hidden'] = output
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = target_mask
output_dict['streaming_state'] = streaming_state
output_dict['target'] = batch.get('target_output')
# output_dict['lid_logits'] = decoder_output['lid_logits']
# final layer: computing softmax
if self.training and nce:
output_dict = self.generator[0](output_dict)
else:
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
# Mirror network: reverse the target sequence and perform backward language model
if mirror:
# tgt_reverse = torch.flip(batch.get('target_input'), (0, ))
tgt_pos = torch.flip(batch.get('target_pos'), (0,))
tgt_reverse = torch.flip(batch.get('target'), (0,))
tgt_reverse_input = tgt_reverse[:-1]
tgt_reverse_output = tgt_reverse[1:]
tgt_reverse_input = tgt_reverse_input.transpose(0, 1)
# perform an additional backward pass
reverse_decoder_output = self.mirror_decoder(tgt_reverse_input, context, src, src_lang=src_lang,
tgt_lang=tgt_lang, input_pos=tgt_pos)
reverse_decoder_output['src'] = src
reverse_decoder_output['context'] = context
reverse_decoder_output['target_mask'] = target_mask
reverse_logprobs = self.mirror_generator[0](reverse_decoder_output)['logits']
output_dict['reverse_target'] = tgt_reverse_output
output_dict['reverse_hidden'] = reverse_decoder_output['hidden']
output_dict['reverse_logprobs'] = reverse_logprobs
output_dict['target_input'] = batch.get('target_input')
output_dict['target_lengths'] = batch.tgt_lengths
# learn weights for mapping (g in the paper)
output_dict['hidden'] = self.mirror_g(output_dict['hidden'])
# compute the logits for each encoder step
if self.ctc:
output_dict['encoder_logits'] = self.ctc_linear(output_dict['context'])
del encoder_output
return output_dict
def load_encoder_weights(self, pretrained_model):
# take the shared encoder section of the encoder
encoder_ = self.encoder.encoder
pretrained_model.encoder.language_embedding = None
enc_language_embedding = encoder_.language_embedding
encoder_.language_embedding = None
encoder_state_dict = pretrained_model.encoder.state_dict()
encoder_.load_state_dict(encoder_state_dict)
encoder_.language_embedding = enc_language_embedding
# TODO: override
def create_decoder_state(self, batch, beam_size=1, type=1, buffering=True, factorize=True, **kwargs):
"""
Generate a new decoder state based on the batch input
:param buffering:
:param streaming:
:param type:
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
src = batch.get('source')
tgt_atb = batch.get('target_atb')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
past_src = batch.get('past_source')
src_transposed = src.transpose(0, 1)
# encoder_output = self.encoder(src_transposed, input_pos=src_pos, input_lang=src_lang)
encoder_output = self.encoder(src_transposed, past_input=past_src.transpose(0, 1), input_lang=src_lang,
factorize=factorize)
# The decoding state is still the same?
print("[INFO] create Transformer decoding state with buffering", buffering)
decoder_state = TransformerDecodingState(src, tgt_lang, encoder_output['context'], src_lang,
beam_size=beam_size, model_size=self.model_size,
type=type, buffering=buffering)
return decoder_state | 10,079 | 42.261803 | 115 | py |
NMTGMinor | NMTGMinor-master/onmt/models/discourse/gate_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.modules.pre_post_processing import PrePostProcessing
from onmt.modules.linear import FeedForward
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.multilingual_factorized.linear import MFWPositionWiseFeedForward
from onmt.modules.multilingual_factorized.encdec_attention import MFWEncdecMultiheadAttn
from onmt.modules.multilingual_factorized.relative_attention import MFWRelativeSelfMultiheadAttn
from onmt.modules.multilingual_partitioned.linear import MPPositionWiseFeedForward
from onmt.modules.multilingual_partitioned.encdec_attention import MPEncdecMultiheadAttn
from onmt.modules.multilingual_partitioned.relative_attention import MPRelativeSelfMultiheadAttn
def preprocessing(rezero, *args, **kwargs):
if rezero:
return Identity()
else:
return PrePostProcessing(*args, **kwargs)
class RelativeGateEncoderLayer(nn.Module):
def __init__(self, opt, **kwargs):
super(RelativeGateEncoderLayer, self).__init__()
self.variational = opt.variational_dropout
self.depthwise_conv = opt.depthwise_conv
self.mfw = opt.multilingual_factorized_weights
self.mpw = opt.multilingual_partitioned_weights
self.mln = opt.multilingual_layer_norm
self.no_ffn = opt.no_ffn
self.weight_drop = opt.weight_drop
self.multilingual_adapter = opt.multilingual_adapter
self.adapter_bottleneck_size = opt.adapter_bottleneck_size
self.macaron = opt.macaron
self.ffn_scale = 0.5 if self.macaron else 1
self.rezero = opt.rezero
self.learnable_pos = opt.learnable_position_encoding
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
if self.macaron:
self.preprocess_mcr_ffn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_mcr_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
if self.mfw:
self.mcr_feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
else:
self.mcr_feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
if self.mfw:
assert not self.mpw, "[ERROR] factorized and partitioned weights cannot be used at the same time."
self.preprocess_attn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
self.preprocess_src_attn = preprocessing(self.rezero, opt.model_size, 0.0, sequence='n',
multilingual=self.mln, n_languages=opt.n_languages)
self.postprocess_src_attn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
self.preprocess_ffn = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_ffn = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
if self.mfw:
self.feedforward = MFWPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = MFWRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
self.multihead_src = MFWEncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout,
n_languages=opt.n_languages, rank=opt.mfw_rank,
use_multiplicative=opt.mfw_multiplicative,
weight_drop=self.weight_drop,
mfw_activation=opt.mfw_activation)
elif self.mpw:
if not self.no_ffn:
self.feedforward = MPPositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
factor_size=opt.mpw_factor_size)
self.multihead = MPRelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
factor_size=opt.mpw_factor_size)
else:
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=self.variational,
activation=opt.ffn_activation,
glu=opt.ffn_glu)
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout,
learnable_pos=self.learnable_pos,
max_pos=opt.max_pos_length)
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
if self.depthwise_conv:
self.preprocess_conv = preprocessing(self.rezero, opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
self.postprocess_conv = PrePostProcessing(opt.model_size, self.residual_dropout,
sequence='dz' if self.rezero else 'da',
variational=self.variational)
self.depthwise_conv = ConformerConvBlock(opt.model_size, opt.conv_kernel, bias=True)
else:
self.depthwise_conv = None
self.gate_linear = Linear(2 * opt.model_size, opt.model_size)
self.preprocess_gate = preprocessing(self.rezero, 2 * opt.model_size, 0.0,
multilingual=self.mln, sequence='n', n_languages=opt.n_languages)
def forward(self, input, context, pos_emb, attn_mask, context_mask, src_lang=None, factorize=False):
"""
:param context: discourse context [T_d x B x H]
:param factorize:
:param input: tensor [T x B x H]
:param pos_emb: tensor [T x 1 x H]
:param attn_mask: tensor [1 x T x B]
:param context_mask: tensor [1 x T_d x B]
:param src_lang: tensor [B] or None
:return:
"""
if self.macaron:
out = self.mcr_feedforward(self.preprocess_mcr_ffn(input), src_lang, factorize=factorize)
ffn_scale = self.ffn_scale
input = self.postprocess_mcr_ffn(out * ffn_scale, input)
"""
Self-attention block
"""
query = self.preprocess_attn(input, factor=src_lang)
if self.mfw or self.mpw:
out, _ = self.multihead(query, pos_emb, src_lang, attn_mask, None, factorize=factorize)
else:
out, _ = self.multihead(query, pos_emb, attn_mask, None)
input_present = self.postprocess_attn(out, input)
"""
Context attention block
"""
query = self.preprocess_src_attn(input, factor=src_lang)
if self.mfw or self.mpw:
out, _ = self.multihead_src(query, context, context, src_lang, src_lang, context_mask,
factorize=factorize)
else:
out, _ = self.multihead_src(query, context, context, context_mask)
input_past = self.postprocess_src_attn(out, input)
"""
Gate
"""
gate_input = self.preprocess_gate(torch.cat([input_past, input_present], dim=-1))
gate = torch.sigmoid(self.gate_linear(gate_input))
input = gate * input_present + (1 - gate) * input_past
"""
Feed forward layer
"""
if not self.no_ffn:
out = self.feedforward(self.preprocess_ffn(input, factor=src_lang), src_lang, factorize=factorize)
# rescaling before residual
ffn_scale = self.ffn_scale
input = self.postprocess_ffn(out * ffn_scale, input)
if self.multilingual_adapter:
input = self.adapters(input, src_lang)
return input
| 11,711 | 51.756757 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/transformer_decoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional
from collections import defaultdict
import torch
import torch.nn as nn
from .modules.positional_embeddings import PositionalEmbedding, SinusoidalPositionalEmbedding
from .modules.layer_drop import LayerDropModuleList
from onmt.modules.layer_norm import LayerNorm
from .modules.transformer_layer import TransformerDecoderLayerBase
from torch import Tensor
from pretrain_module.modeling_mbart import index_copy
import numpy as np
class TransformerDecoderBase(nn.Module):
"""
Transformer decoder consisting of *cfg.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
opt=None
):
self.adapter = None
self.cfg = cfg
super(TransformerDecoderBase, self).__init__()
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = nn.Dropout(
cfg.dropout
)
self.decoder_layerdrop = cfg.decoder_layerdrop
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.embed_dim = embed_dim
self.padding_idx = embed_tokens.padding_idx
print("Decoder padding idx:", self.padding_idx)
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
self.project_in_dim = (
torch.nn.Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
self.checkpoint_activations = cfg.checkpoint_activations
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
self.cross_self_attention = cfg.cross_self_attention
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(cfg, no_encoder_attn)
for _ in range(cfg.decoder_layers)
]
)
self.num_layers = len(self.layers)
if cfg.decoder_normalize_before and not cfg.no_decoder_final_norm:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
self.n_languages = 0
# for layer in self.layers:
# layer.add_adapters(opt.n_languages, adapter_location=opt.decoder_adapter)
def build_decoder_layer(self, cfg, no_encoder_attn=False):
layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn)
# removed checkpoint and fsdp
return layer
def add_adapters(self, n_languages):
from .modules.efficient_adapters import EfficientAdapter
self.adapter = EfficientAdapter(n_languages * self.num_layers,
self.embed_dim, self.embed_dim // 4)
self.n_languages = n_languages
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
checkpointing_ffn=False,
checkpointing_self_attn=False,
checkpointing_cross_attn=False,
lang=None,
**kwargs,
):
bsz, qlen = input_ids.size()
klen = encoder_hidden_states.size(0)
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
input_ids, incremental_state=None
)
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(input_ids)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
can_run_fast_bert_mha = False
if self.fast_bert_mha is not None and torch.is_autocast_enabled():
can_run_fast_bert_mha = True
# unpadding x
if attention_mask is None:
padding_mask = input_ids.new_zeros(bsz, qlen)
else:
padding_mask = attention_mask
padding_mask = padding_mask.contiguous().long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
x = x.view(-1, x.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
x = x.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
non_pad_indices_q = non_pad_indices
# unpadding context
# transposing from [T x B x H] to [B x T x H]
encoder_hidden_states = encoder_hidden_states.transpose(0, 1).contiguous()
padding_mask = encoder_attention_mask
if padding_mask is None:
context_len = encoder_hidden_states.size(1)
padding_mask = input_ids.new_zeros(bsz, context_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
encoder_hidden_states = encoder_hidden_states.view(-1, encoder_hidden_states.size(-1))
non_pad_indices_kv = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
encoder_hidden_states = encoder_hidden_states.index_select(0, non_pad_indices_kv)
max_len_kv = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens_kv = torch.cumsum(a, 0).to(dtype=torch.int32, device=encoder_hidden_states.device)
self_attn_mask = None
else:
x = x.transpose(0, 1).contiguous()
max_len, cu_seqlens = None, None
max_len_kv, cu_seqlens_kv = None, None
# causal masking.
self_attn_mask = torch.triu(
x.new_ones(qlen, qlen), diagonal=1).bool()
non_pad_indices_q, non_pad_indices_kv = None, None
self_attn_padding_mask: Optional[Tensor] = None
# decoder layers
attns = list()
for idx, layer in enumerate(self.layers):
x, layer_attn, _ = layer(
x,
encoder_hidden_states,
encoder_attention_mask,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
checkpointing_ffn=checkpointing_ffn,
checkpointing_self_attn=checkpointing_self_attn,
checkpointing_cross_attn=checkpointing_cross_attn,
max_len=max_len, cu_seqlens=cu_seqlens,
max_len_kv=max_len_kv, cu_seqlens_kv=cu_seqlens_kv,
)
# run through the adapter
if self.adapter is not None:
assert lang is not None
adapter_id = self.adapter.num_modules // self.num_layers * idx + lang
x = self.adapter(x, adapter_id)
attns.append(layer_attn)
if self.layer_norm is not None:
x = self.layer_norm(x)
if can_run_fast_bert_mha:
seq_len = qlen
x = index_copy(x, non_pad_indices_q, bsz * seq_len)
x = x.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
return x, attns
def step(self, input, decoder_state, **kwargs):
# context is stored in the decoder state in [T B H] format
encoder_hidden_states = decoder_state.context
encoder_attention_mask = decoder_state.src_mask
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
atb = decoder_state.tgt_atb
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
input_ids = input
input_shape = input_ids.size()
time_step = input.size(1)
input_ = input
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
input_ids, incremental_state=None
)
x = self.embed_scale * self.embed_tokens(input_ids)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
bsz, qlen = x.size(0), x.size(1)
using_buffer = (x.size(1) > 1 and len(buffers) > 0)
if buffering:
# use the last value of input to continue decoding
if using_buffer:
# if buffers has not been initilized and we have > 1 input length data
# then its a prefix decoding step
x = x[:, -1:, :]
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
can_run_fast_bert_mha = False
if self.fast_bert_mha is not None and (torch.is_autocast_enabled() or x.dtype == torch.half) and not buffering:
can_run_fast_bert_mha = True
# unpadding x
padding_mask = input_ids.new_zeros(bsz, qlen)
padding_mask = padding_mask.contiguous().long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
x = x.view(-1, x.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
x = x.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
non_pad_indices_q = non_pad_indices
# unpadding context
# transposing from [T x B x H] to [B x T x H]
encoder_hidden_states = encoder_hidden_states.transpose(0, 1).contiguous()
padding_mask = encoder_attention_mask
if padding_mask is None:
context_len = encoder_hidden_states.size(1)
padding_mask = input_ids.new_zeros(bsz, context_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
encoder_hidden_states = encoder_hidden_states.view(-1, encoder_hidden_states.size(-1))
non_pad_indices_kv = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
encoder_hidden_states = encoder_hidden_states.index_select(0, non_pad_indices_kv)
max_len_kv = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens_kv = torch.cumsum(a, 0).to(dtype=torch.int32, device=encoder_hidden_states.device)
self_attn_mask = None
else:
non_pad_indices_q, non_pad_indices_kv = None, None
# B x T x C -> T x B x C
x = x.transpose(0, 1).contiguous()
max_len = None
cu_seqlens = None
max_len_kv = None
cu_seqlens_kv = None
# causal masking.
self_attn_mask = torch.triu(
x.new_ones(qlen, qlen), diagonal=1).bool()
if buffering and using_buffer:
self_attn_mask = self_attn_mask[-1:, :]
# decoder layers
attns = list()
for idx, layer in enumerate(self.layers):
if buffering:
buffer = buffers[idx] if idx in buffers else None
else:
buffer = None
x, layer_attn, buffer = layer(
x,
encoder_hidden_states,
encoder_attention_mask,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=None,
max_len = max_len, cu_seqlens = cu_seqlens,
max_len_kv = max_len_kv, cu_seqlens_kv = cu_seqlens_kv,
incremental=buffering, incremental_cache=buffer,
)
if buffering:
decoder_state.update_attention_buffer(buffer, idx)
attns.append(layer_attn)
if self.layer_norm is not None:
x = self.layer_norm(x)
if can_run_fast_bert_mha:
seq_len = qlen
x = index_copy(x, non_pad_indices_q, bsz * seq_len)
x = x.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
output = x[-1].unsqueeze(0)
coverage = attns[-1]
if coverage is None:
coverage = output.new_zeros(bsz, seq_len, seq_len)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = encoder_hidden_states
return output_dict
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim] | 15,616 | 35.832547 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/transformer_encoder.py | import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
# from fairseq.modules import (
# FairseqDropout,
# LayerDropModuleList,
# LayerNorm,
# PositionalEmbedding,
# SinusoidalPositionalEmbedding,
# )
from .modules.positional_embeddings import PositionalEmbedding, SinusoidalPositionalEmbedding
from .modules.layer_drop import LayerDropModuleList
from onmt.modules.layer_norm import LayerNorm
from .modules.transformer_layer import TransformerEncoderLayerBase
from pretrain_module.modeling_mbart import index_copy
import numpy as np
class TransformerEncoderBase(nn.Module):
"""
Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, cfg, embed_tokens):
self.cfg = cfg
super(TransformerEncoderBase, self).__init__()
# TODO
# self.dictionary = dictionary
self.register_buffer("version", torch.Tensor([3]))
# TODO
self.dropout_module = nn.Dropout(cfg.dropout)
# TODO
self.encoder_layerdrop = cfg.encoder_layerdrop
# TODO
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = cfg.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
if not cfg.no_token_positional_embeddings:
self.embed_positions = (
PositionalEmbedding(
cfg.max_source_positions,
embed_dim,
self.padding_idx,
learned=cfg.encoder_learned_pos,
)
)
else:
self.embed_positions = None
# TODO
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(cfg) for i in range(cfg.encoder_layers)]
)
self.num_layers = len(self.layers)
if cfg.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
self.n_languages = -1
self.has_adapter = False
def build_encoder_layer(self, cfg):
layer = TransformerEncoderLayerBase(cfg)
# removed the checkpointing and fdsp part
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
def forward(
self,
src_tokens,
src_mask: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
return self.forward_scriptable(
src_tokens, src_mask, return_all_hiddens, token_embeddings
)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_mask: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
"""
# compute padding mask
if src_mask is None:
encoder_padding_mask = src_tokens.eq(self.padding_idx)
else:
encoder_padding_mask = src_mask
has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any()
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# TODO: use fast bert mha
can_run_fast_bert_mha = False
# check if fast bert mha can be run
seq_len = x.size(1)
bsz = x.size(0)
if self.fast_bert_mha and torch.is_autocast_enabled():
can_run_fast_bert_mha = True
# print("Can run FAST BERT MHA")
padding_mask = encoder_padding_mask # [B x T]
# masked positions = 1 so to compute length we need the (1 -)
if padding_mask is None:
padding_mask = x.new_zeros(bsz, seq_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
x = x.view(-1, x.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
x = x.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
else:
max_len = -1
cu_seqlens = None
non_pad_indices = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
# encoder layers
for layer in self.layers:
x = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None,
max_len=max_len, cu_seqlens=cu_seqlens
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not support returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
src_lengths = src_tokens.ne(self.padding_idx).sum(dim=1, dtype=torch.int32).reshape(-1, 1).contiguous()
# return {
# "encoder_out": [x], # T x B x C
# "encoder_padding_mask": [encoder_padding_mask], # B x T
# "encoder_embedding": [encoder_embedding], # B x T x C
# "encoder_states": encoder_states, # List[T x B x C]
# "src_tokens": [],
# "src_lengths": [src_lengths],
# }
if can_run_fast_bert_mha:
# remove the patch
# if x.size(0) > total_bsz:
# x = x[:total_bsz, :]
x = index_copy(x, non_pad_indices, bsz * seq_len)
x = x.view(bsz, seq_len, -1)
x = x.transpose(0, 1).contiguous()
return x, encoder_padding_mask, encoder_embedding, encoder_states | 7,894 | 33.627193 | 111 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/deltalm.py | import os
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from .transformer_encoder import TransformerEncoderBase
from .transformer_decoder import TransformerDecoderBase
from .modules.transformer_layer import TransformerDecoderLayerBase
from .modules.utils import get_activation_fn
from onmt.modules.layer_norm import LayerNorm
from .modules.multihead_attention import MultiHeadAttention
from onmt.modules.optimized.dropout_add import fused_dropout_add
from pretrain_module.modeling_mbart import index_copy
def dropout_residual_connection(x, residual, dropout_module, is_training):
return dropout_add_jit(x, residual, dropout_module.p, is_training)
@torch.jit.script
def dropout_add_jit(x, residual, prob, is_training) :
# type: (Tensor, Tensor, float, bool) -> Tensor
out = torch.nn.functional.dropout(x, p=prob, training=is_training)
out = residual + out
return out
def linear_act_linear(x, fc1, fc2, prob, is_training, activation_func):
out = fc1(x)
out = activation_func(out)
out = torch.nn.functional.dropout(out, p=prob, training=is_training)
out = fc2(out)
return out
def upgrade_state_dict_for_deltalm(
state_dict: Dict[str, Any], pretrained_deltalm_checkpoint: str, is_encoder=True,
) -> Dict[str, Any]:
if not os.path.exists(pretrained_deltalm_checkpoint):
raise IOError("Model file not found: {}".format(pretrained_deltalm_checkpoint))
with open(pretrained_deltalm_checkpoint, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if 'weights' in state:
deltalm_state_dict = state['weights']
elif 'model' in state:
deltalm_state_dict = state['model']
else:
deltalm_state_dict = state
new_deltalm_state_dict = {}
for key in deltalm_state_dict.keys():
if is_encoder:
if key.startswith('encoder.') or key.startswith('src_embedding.'):
new_key = key.replace('encoder.', '')
new_key = new_key.replace('src_embedding.', '')
new_deltalm_state_dict[new_key] = deltalm_state_dict[key]
else:
if key.startswith('decoder.') or key.startswith('tgt_embedding.'):
new_key = key.replace('decoder.', '')
new_key = new_key.replace('tgt_embedding.', '')
new_deltalm_state_dict[new_key] = deltalm_state_dict[key]
deltalm_state_dict = new_deltalm_state_dict
# print(deltalm_state_dict.keys())
for key in deltalm_state_dict.keys():
if "output_projection" in key:
continue
map_key = key
map_key = map_key.replace('.ffn_1.fc1', '.fc3')
map_key = map_key.replace('.ffn_1.fc2', '.fc4')
map_key = map_key.replace('.ffn_2', '')
map_key = map_key.replace('.ffn.', '.')
map_key = map_key.replace('emb_layer_norm', 'layernorm_embedding')
# print(key, state_dict[map_key].size(), deltalm_state_dict[key].size())
assert map_key in state_dict, map_key
if 'embed_positions' in key or 'embed_tokens' in key:
left_size = state_dict[map_key].size(0)
right_size = deltalm_state_dict[key].size(0)
if left_size <= right_size:
state_dict[map_key] = deltalm_state_dict[key][:left_size]
else:
state_dict[map_key][:right_size] = deltalm_state_dict[key]
else:
state_dict[map_key] = deltalm_state_dict[key]
return state_dict
class DeltaLMEncoder(TransformerEncoderBase):
def __init__(self, args, embed_tokens, opt=None):
super().__init__(args, embed_tokens)
if opt is not None:
print("Overriding dropout values for DeltaLM....")
args.decoder_layerdrop = opt.death_rate_decoder
args.activation_dropout = opt.ffn_dropout
if getattr(args, "pretrained_deltalm_checkpoint", "") != "":
self_state_dict = self.state_dict()
deltalm_loaded_state_dict = upgrade_state_dict_for_deltalm(
state_dict=self_state_dict,
pretrained_deltalm_checkpoint=args.pretrained_deltalm_checkpoint,
is_encoder=True,
)
for key in self_state_dict:
if key not in deltalm_loaded_state_dict:
print("Warning: key %s not found in pretrained dictionary." % key)
for key in deltalm_loaded_state_dict:
if key not in self_state_dict:
print("Warning: key %s in pretrained dictionary not found in current model." % key)
self.load_state_dict(deltalm_loaded_state_dict, strict=True)
print("Load DeltaLM's encoder from {0}".format(args.pretrained_deltalm_checkpoint))
class DeltaLMDecoder(TransformerDecoderBase):
def __init__(self, args, embed_tokens, no_encoder_attn=False, opt=None):
if opt is not None:
print("Overriding dropout values for DeltaLM....")
args.decoder_layerdrop = opt.death_rate_decoder
args.activation_dropout = opt.ffn_dropout
super().__init__(args, embed_tokens, no_encoder_attn, opt)
if getattr(args, "pretrained_deltalm_checkpoint", "") != "":
deltalm_loaded_state_dict = upgrade_state_dict_for_deltalm(
state_dict=self.state_dict(),
pretrained_deltalm_checkpoint=args.pretrained_deltalm_checkpoint,
is_encoder=False,
)
self.load_state_dict(deltalm_loaded_state_dict, strict=False)
print("Load DeltaLM's decoder from {0}".format(args.pretrained_deltalm_checkpoint))
self.model_size = args.decoder_embed_dim
self.switchout = 0.0
self.adapter = None
if opt is not None and opt.decoder_adapter > 0:
print("[INFO] Adding MBART Adapters for %d languages" % opt.n_languages)
self.add_adapters(opt.n_languages)
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = DeltaLMDecoderLayer(args, no_encoder_attn)
return layer
class DeltaLMDecoderLayer(TransformerDecoderLayerBase):
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super(TransformerDecoderLayerBase, self).__init__()
self.embed_dim = args.decoder_embed_dim
self.dropout_module = nn.Dropout(
args.dropout
)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = nn.Dropout(
float(activation_dropout_p)
)
self.normalize_before = args.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim
)
self.fc2 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim
)
self.fc3 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim
)
self.fc4 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim
)
self.ffn_layer_norm = LayerNorm(self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
self.checkpoint_activations = args.checkpoint_activations
self.activation_fn_name = args.activation_fn
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
# TODO: add incremental states
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
checkpointing_ffn=False,
checkpointing_self_attn=False,
checkpointing_cross_attn=False,
incremental=False, incremental_cache=None,
max_len=None, cu_seqlens=None,
max_len_kv=None, cu_seqlens_kv=None,
**kwargs
):
"""
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
if need_head_weights:
need_attn = True
###############################################
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn, _ = self.self_attn(
hidden_states=x,
attention_mask=self_attn_mask,
output_attentions=False,
checkpointing=checkpointing_self_attn,
cu_seqlens = cu_seqlens, max_len = max_len,
incremental=incremental, incremental_cache=incremental_cache
)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.ffn_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc3.weight, self.fc4.weight]
biases = [self.fc3.bias, self.fc4.bias]
x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases)
else:
x = self.activation_fn(self.fc3(x))
x = self.activation_dropout_module(x)
x = self.fc4(x)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.ffn_layer_norm(x)
###############################################
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn, _ = self.encoder_attn(
hidden_states=x,
key_value_states=encoder_out,
attention_mask=encoder_padding_mask,
output_attentions=False,
checkpointing=checkpointing_cross_attn,
cu_seqlens=cu_seqlens, max_len=max_len,
cu_seqlens_kv=cu_seqlens_kv, max_len_kv=max_len_kv,
incremental=incremental, incremental_cache=incremental_cache
)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc1.weight, self.fc2.weight]
biases = [self.fc1.bias, self.fc2.bias]
x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases)
else:
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, incremental_cache
class OmniDeltaLMDecoderLayer(DeltaLMDecoderLayer):
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
checkpointing_ffn=False,
checkpointing_self_attn=False,
checkpointing_cross_attn=False,
stack=None,
**kwargs
):
"""
Args:
x: [T x B x D]
encoder_out: [T x B x D]
encoder_padding_mask: [B x T]
self_attn_mask: [B x T] or [T x T]?
self_attn_padding_mask: [B x T]
need_attn:
need_head_weights:
checkpointing_ffn:
checkpointing_self_attn:
checkpointing_cross_attn:
stack: a list of previously used inputs (used for all-attention)
**kwargs:
Returns:
"""
if need_head_weights:
need_attn = True
###############################################
residual = x
# should we need layer norm anymore? (probably)
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn, _ = self.self_attn(
hidden_states=x,
attention_mask=self_attn_mask,
output_attentions=False,
checkpointing=checkpointing_self_attn
)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
# x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.ffn_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc3.weight, self.fc4.weight]
biases = [self.fc3.bias, self.fc4.bias]
x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases)
else:
x = self.activation_fn(self.fc3(x))
x = self.activation_dropout_module(x)
x = self.fc4(x)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.ffn_layer_norm(x)
###############################################
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn, _ = self.encoder_attn(
hidden_states=x,
key_value_states=encoder_out,
attention_mask=encoder_padding_mask,
output_attentions=False,
checkpointing=checkpointing_cross_attn
)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc1.weight, self.fc2.weight]
biases = [self.fc1.bias, self.fc2.bias]
x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases)
else:
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, None
| 17,669 | 34.841785 | 103 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/multihead_attention.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
# from fairseq import utils
# from fairseq.incremental_decoding_utils import with_incremental_state
# from fairseq.modules.fairseq_dropout import FairseqDropout
# from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
class MultiHeadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = nn.Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
# if (
# not is_tpu # don't use PyTorch version on TPUs
# and incremental_state is None
# and not static_kv
# # A workaround for quantization to work. Otherwise JIT compilation
# # treats bias in linear module as method.
# and not torch.jit.is_scripting()
# ):
# assert key is not None and value is not None
#
#
# if incremental_state is not None:
# saved_state = self._get_input_buffer(incremental_state)
# if saved_state is not None and "prev_key" in saved_state:
# # previous time steps are cached - no need to recompute
# # key and value if they are static
# if static_kv:
# assert self.encoder_decoder_attention and not self.self_attention
# key = value = None
# else:
# saved_state = None
#
# if self.self_attention:
# q = self.q_proj(query)
# k = self.k_proj(query)
# v = self.v_proj(query)
# elif self.encoder_decoder_attention:
# # encoder-decoder attention
# q = self.q_proj(query)
# if key is None:
# assert value is None
# k = v = None
# else:
# k = self.k_proj(key)
# v = self.v_proj(key)
#
# else:
# assert key is not None and value is not None
# q = self.q_proj(query)
# k = self.k_proj(key)
# v = self.v_proj(value)
# q *= self.scaling
#
# if self.bias_k is not None:
# assert self.bias_v is not None
# k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
# v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
# if attn_mask is not None:
# attn_mask = torch.cat(
# [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
# )
# if key_padding_mask is not None:
# key_padding_mask = torch.cat(
# [
# key_padding_mask,
# key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
# ],
# dim=1,
# )
#
# q = (
# q.contiguous()
# .view(tgt_len, bsz * self.num_heads, self.head_dim)
# .transpose(0, 1)
# )
# if k is not None:
# k = (
# k.contiguous()
# .view(-1, bsz * self.num_heads, self.head_dim)
# .transpose(0, 1)
# )
# if v is not None:
# v = (
# v.contiguous()
# .view(-1, bsz * self.num_heads, self.head_dim)
# .transpose(0, 1)
# )
#
# if saved_state is not None:
# # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
# if "prev_key" in saved_state:
# _prev_key = saved_state["prev_key"]
# assert _prev_key is not None
# prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
# if static_kv:
# k = prev_key
# else:
# assert k is not None
# k = torch.cat([prev_key, k], dim=1)
# src_len = k.size(1)
# if "prev_value" in saved_state:
# _prev_value = saved_state["prev_value"]
# assert _prev_value is not None
# prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
# if static_kv:
# v = prev_value
# else:
# assert v is not None
# v = torch.cat([prev_value, v], dim=1)
# prev_key_padding_mask: Optional[Tensor] = None
# if "prev_key_padding_mask" in saved_state:
# prev_key_padding_mask = saved_state["prev_key_padding_mask"]
# assert k is not None and v is not None
# key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
# key_padding_mask=key_padding_mask,
# prev_key_padding_mask=prev_key_padding_mask,
# batch_size=bsz,
# src_len=k.size(1),
# static_kv=static_kv,
# )
#
# saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
# saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
# saved_state["prev_key_padding_mask"] = key_padding_mask
# # In this branch incremental_state is never None
# assert incremental_state is not None
# incremental_state = self._set_input_buffer(incremental_state, saved_state)
# assert k is not None
# assert k.size(1) == src_len
#
# # This is part of a workaround to get around fork/join parallelism
# # not supporting Optional types.
# if key_padding_mask is not None and key_padding_mask.dim() == 0:
# key_padding_mask = None
#
# if key_padding_mask is not None:
# assert key_padding_mask.size(0) == bsz
# assert key_padding_mask.size(1) == src_len
#
# if self.add_zero_attn:
# assert v is not None
# src_len += 1
# k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
# v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
# if attn_mask is not None:
# attn_mask = torch.cat(
# [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
# )
# if key_padding_mask is not None:
# key_padding_mask = torch.cat(
# [
# key_padding_mask,
# torch.zeros(key_padding_mask.size(0), 1).type_as(
# key_padding_mask
# ),
# ],
# dim=1,
# )
#
# attn_weights = torch.bmm(q, k.transpose(1, 2))
# attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
#
# assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
#
# if attn_mask is not None:
# attn_mask = attn_mask.unsqueeze(0)
# if self.onnx_trace:
# attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
# attn_weights += attn_mask
#
# if key_padding_mask is not None:
# # don't attend to padding symbols
# attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
# if not is_tpu:
# attn_weights = attn_weights.masked_fill(
# key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
# float("-inf"),
# )
# else:
# attn_weights = attn_weights.transpose(0, 2)
# attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
# attn_weights = attn_weights.transpose(0, 2)
# attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
#
# if before_softmax:
# return attn_weights, v
#
# attn_weights_float = utils.softmax(
# attn_weights, dim=-1, onnx_trace=self.onnx_trace
# )
# attn_weights = attn_weights_float.type_as(attn_weights)
# attn_probs = self.dropout_module(attn_weights)
#
# assert v is not None
# attn = torch.bmm(attn_probs, v)
# assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
# if self.onnx_trace and attn.size(1) == 1:
# # when ONNX tracing a single decoder step (sequence length == 1)
# # the transpose is a no-op copy before view, thus unnecessary
# attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
# else:
# attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
# attn = self.out_proj(attn)
# attn_weights: Optional[Tensor] = None
# if need_weights:
# attn_weights = attn_weights_float.view(
# bsz, self.num_heads, tgt_len, src_len
# ).transpose(1, 0)
# if not need_head_weights:
# # average attention weights over heads
# attn_weights = attn_weights.mean(dim=0)
#
# return attn, attn_weights
#
# @staticmethod
# def _append_prev_key_padding_mask(
# key_padding_mask: Optional[Tensor],
# prev_key_padding_mask: Optional[Tensor],
# batch_size: int,
# src_len: int,
# static_kv: bool,
# ) -> Optional[Tensor]:
# # saved key padding masks have shape (bsz, seq_len)
# if prev_key_padding_mask is not None and static_kv:
# new_key_padding_mask = prev_key_padding_mask
# elif prev_key_padding_mask is not None and key_padding_mask is not None:
# new_key_padding_mask = torch.cat(
# [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
# )
# # During incremental decoding, as the padding token enters and
# # leaves the frame, there will be a time when prev or current
# # is None
# elif prev_key_padding_mask is not None:
# if src_len > prev_key_padding_mask.size(1):
# filler = torch.zeros(
# (batch_size, src_len - prev_key_padding_mask.size(1)),
# device=prev_key_padding_mask.device,
# )
# new_key_padding_mask = torch.cat(
# [prev_key_padding_mask.float(), filler.float()], dim=1
# )
# else:
# new_key_padding_mask = prev_key_padding_mask.float()
# elif key_padding_mask is not None:
# if src_len > key_padding_mask.size(1):
# filler = torch.zeros(
# (batch_size, src_len - key_padding_mask.size(1)),
# device=key_padding_mask.device,
# )
# new_key_padding_mask = torch.cat(
# [filler.float(), key_padding_mask.float()], dim=1
# )
# else:
# new_key_padding_mask = key_padding_mask.float()
# else:
# new_key_padding_mask = prev_key_padding_mask
# return new_key_padding_mask
#
# @torch.jit.export
# def reorder_incremental_state(
# self,
# incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
# new_order: Tensor,
# ):
# """Reorder buffered internal state (for incremental generation)."""
# input_buffer = self._get_input_buffer(incremental_state)
# if input_buffer is not None:
# for k in input_buffer.keys():
# input_buffer_k = input_buffer[k]
# if input_buffer_k is not None:
# if self.encoder_decoder_attention and input_buffer_k.size(
# 0
# ) == new_order.size(0):
# break
# input_buffer[k] = input_buffer_k.index_select(0, new_order)
# incremental_state = self._set_input_buffer(incremental_state, input_buffer)
# return incremental_state
#
# def _get_input_buffer(
# self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
# ) -> Dict[str, Optional[Tensor]]:
# result = self.get_incremental_state(incremental_state, "attn_state")
# if result is not None:
# return result
# else:
# empty_result: Dict[str, Optional[Tensor]] = {}
# return empty_result
#
# def _set_input_buffer(
# self,
# incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
# buffer: Dict[str, Optional[Tensor]],
# ):
# return self.set_incremental_state(incremental_state, "attn_state", buffer)
#
# def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
# return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value | 19,975 | 40.272727 | 90 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/efficient_adapters.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import get_activation_fn
from onmt.modules.layer_norm import layer_norm_func
from onmt.modules.optimized.linear import Linear as LinearModule
def Linear(in_features, out_features, bias=True):
m = LinearModule(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class EfficientAdapter(nn.Module):
def __init__(
self,
num_modules: int,
input_size: int,
bottleneck_size: int,
activation_fn: str = "relu",
static_layernorm: bool = False,
):
"""
Implements an Adapter layer following the architecture of
Bapna and Firat 2019 - Simple, Scalable Adaptation for Neural Machine Translation
https://aclanthology.org/D19-1165/
This particular implementation uses a shared up/down projection matrix
for all adapters and in the forward-pass it simply indexes the
corresponding adapter (row). This is a workaround for an efficiency
bug that occurs in distributed training.
Args:
input_size (int): the dimensionality of the input feature vector
bottleneck_size (int): the dimensionality of the bottleneck vector
activation_fn (str): the activation function used after the down-projection
static_layernorm (bool): use LayerNorm without trainable parameters
"""
super().__init__()
# reuse the transformer Linear layer to have consistent init with the rest of the model
self.num_modules = num_modules
self.static_layernorm = static_layernorm
self.down_weight = Linear(bottleneck_size * input_size, num_modules, bias=False)
self.down_bias = Linear(bottleneck_size, num_modules, bias=False)
self.up_weight = Linear(bottleneck_size * input_size, num_modules, bias=False)
self.up_bias = Linear(input_size, num_modules, bias=False)
if not self.static_layernorm:
self.layer_norm_gammas = Linear(input_size, num_modules)
self.layer_norm_betas = Linear(input_size, num_modules)
self.activation = get_activation_fn(activation_fn)
# ensure normal initialization
# initialize the parameters of each "adapter" row similar to nn.Linear()
with torch.no_grad():
for i in range(num_modules):
self.down_weight.weight[i] = Linear(input_size, bottleneck_size).weight.view(-1)
self.up_weight.weight[i] = Linear(bottleneck_size, input_size).weight.view(-1)
self.down_bias.weight[i].fill_(0)
self.up_weight.weight[i].fill_(0)
if not self.static_layernorm:
self.layer_norm_gammas.weight[i].fill_(1)
self.layer_norm_betas.weight[i].fill_(0)
for n, p in self.named_parameters():
p.adapter = True
p.label = n
# Fused MLP config
self.fused = False
self.fused_function = None
if activation_fn == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif activation_fn == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
def forward(self, x: torch.Tensor, index: int):
shortcut = x
down_w = self.down_weight.weight[index]
up_w = self.up_weight.weight[index]
down_b = self.down_bias.weight[index]
up_b = self.up_bias.weight[index]
ln_g = None
ln_b = None
if not self.static_layernorm:
# ensure ln_g will have mean of 1, instead of 0
ln_g = self.layer_norm_gammas.weight[index]
ln_b = self.layer_norm_betas.weight[index]
x = layer_norm_func(x, ln_g, ln_b, (shortcut.size(-1),))
if self.fused and x.is_cuda:
dropout_p = 0.0
weights = [down_w, up_w]
biases = [down_b, up_b]
x = self.fused_function(dropout_p, False, x, *weights, *biases)
else:
x = F.linear(x, down_w.view(-1, shortcut.size(-1)), down_b)
x = self.activation(x)
x = F.linear(x, up_w.view(shortcut.size(-1), -1), up_b)
return x + shortcut
| 4,610 | 38.75 | 96 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/positional_embeddings.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
def make_positions(tensor, padding_idx: int):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = torch.zeros(
(1, 1), device=input.device, dtype=input.dtype
).fill_(int(self.padding_idx + input.size(1)))
else:
positions = make_positions(
input, self.padding_idx,
)
max_position = self.max_positions - 1
positions = torch.clamp(positions, 0, max_position)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx if padding_idx is not None else 0
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.max_positions = int(1e5)
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos, self.embedding_dim, self.padding_idx
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(
input, self.padding_idx
)
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
):
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim,
padding_idx,
init_size=num_embeddings + padding_idx + 1,
)
return m | 6,620 | 36.619318 | 94 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/utils.py | try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple, Callable, Dict, List, TYPE_CHECKING
import numpy as np
import torch
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
import torch.nn.functional as F
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
] | 1,520 | 24.779661 | 91 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/transformer_layer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
# from fairseq import utils
# from onmt.models.speech_recognizer.fairseq_wav2vec2.fairseq_modules import MultiheadAttention
from onmt.modules.layer_norm import LayerNorm
# from fairseq.modules import LayerNorm, MultiheadAttention
# from fairseq.modules.fairseq_dropout import FairseqDropout
# from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
from .utils import get_activation_fn
from .multihead_attention import MultiHeadAttention
def dropout_residual_connection(x, residual, dropout_module, is_training):
return dropout_add_jit(x, residual, dropout_module.p, is_training)
@torch.jit.script
def dropout_add_jit(x, residual, prob, is_training) :
# type: (Tensor, Tensor, float, bool) -> Tensor
out = torch.nn.functional.dropout(x, p=prob, training=is_training)
out = residual + out
return out
def linear_act_linear(x, fc1, fc2, prob, is_training, activation_func):
out = fc1(x)
out = activation_func(out)
out = torch.nn.functional.dropout(out, p=prob, training=is_training)
out = fc2(out)
return out
class TransformerEncoderLayerBase(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.encoder.normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.embed_dim = cfg.encoder_embed_dim
# self.quant_noise = cfg.quant_noise.pq
# self.quant_noise_block_size = cfg.quant_noise.pq_block_size
self.self_attn = self.build_self_attention(self.embed_dim, cfg)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = nn.Dropout(cfg.dropout)
self.activation_fn = get_activation_fn(activation=cfg.activation_fn)
activation_dropout_p = cfg.activation_dropout
if activation_dropout_p == 0:
# for backwards compatibility with models that use cfg.relu_dropout
activation_dropout_p = cfg.relu_dropout or 0
self.activation_dropout_module = nn.Dropout(activation_dropout_p)
self.normalize_before = cfg.encoder_normalize_before
self.fc1 = self.build_fc1(
self.embed_dim,
cfg.encoder_ffn_embed_dim
)
self.fc2 = self.build_fc2(
cfg.encoder_ffn_embed_dim,
self.embed_dim
)
self.checkpoint_activations = cfg.checkpoint_activations
self.final_layer_norm = LayerNorm(self.embed_dim)
self.activation_fn_name = cfg.activation_fn
# Fused MLP config
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
# Adapter config
self.n_languages = -1
self.has_adapter = False
def build_fc1(self, input_dim, output_dim, *args):
return nn.Linear(input_dim, output_dim)
def build_fc2(self, input_dim, output_dim, *args):
return nn.Linear(input_dim, output_dim)
def build_self_attention(self, embed_dim, cfg):
from pretrain_module.modeling_mbart import MBartAttention
return MBartAttention(
embed_dim=embed_dim,
num_heads=cfg.encoder_attention_heads,
dropout=cfg.attention_dropout,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(
self,
x,
encoder_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
max_len=None, cu_seqlens=None,
**kwargs
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _, _ = self.self_attn(
hidden_states=x,
attention_mask=encoder_padding_mask,
output_attentions=False,
max_len=max_len, cu_seqlens=cu_seqlens
)
# x = self.dropout_module(x)
# x = self.residual_connection(x, residual)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if self.fused and x.is_cuda:
dropout_p = self.activation_dropout_module.p if self.training else 0.0
weights = [self.fc1.weight, self.fc2.weight]
biases = [self.fc1.bias, self.fc2.bias]
x = self.fused_function(dropout_p, False, x, *weights, *biases)
else:
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = dropout_residual_connection(x, residual, self.dropout_module, self.training)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
class TransformerDecoderLayerBase(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, cfg, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__()
self.embed_dim = cfg.decoder_embed_dim
self.dropout_module = nn.Dropout(cfg.dropout)
self.cross_self_attention = cfg.cross_self_attention
self.self_attn = self.build_self_attention(
self.embed_dim,
cfg,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = get_activation_fn(activation=cfg.activation_fn)
activation_dropout_p = cfg.activation_dropout
if activation_dropout_p == 0:
# for backwards compatibility with models that use cfg.relu_dropout
activation_dropout_p = cfg.relu_dropout or 0
self.activation_dropout_module = nn.Dropout(float(activation_dropout_p))
self.normalize_before = cfg.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, cfg)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = self.build_fc1(
self.embed_dim,
cfg.decoder_ffn_embed_dim
)
self.fc2 = self.build_fc2(
cfg.decoder_ffn_embed_dim,
self.embed_dim
)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
elf.checkpoint_activations
# self.activation_fn_name = cfg.activation_fn
# self.fused = False
# self.fused_function = None
# if self.activation_fn_name == 'relu':
# from onmt.modules.mlp.mlp import mlp_relu_function
# if mlp_relu_function is not None:
# self.fused_function = mlp_relu_function
# self.fused = True
# elif self.activation_fn_name == 'gelu':
# from onmt.modules.mlp.mlp import mlp_gelu_function
# if mlp_gelu_function is not None:
# self.fused_function = mlp_gelu_function
# self.fused = True
def build_fc1(self, input_dim, output_dim):
return nn.Linear(input_dim, output_dim)
def build_fc2(self, input_dim, output_dim):
return nn.Linear(input_dim, output_dim)
def build_self_attention(
self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False
):
from pretrain_module.modeling_mbart import MBartAttention
return MBartAttention( # MBartAutoRegressiveSelfAttentionSLow(
embed_dim=embed_dim,
num_heads=cfg.decoder_attention_heads,
dropout=cfg.attention_dropout,
is_decoder=True,
)
# return MultiHeadAttention(
# embed_dim,
# cfg.decoder_attention_heads,
# dropout=cfg.attention_dropout,
# add_bias_kv=add_bias_kv,
# add_zero_attn=add_zero_attn,
# self_attention=not cfg.cross_self_attention
# )
def build_encoder_attention(self, embed_dim, cfg):
from pretrain_module.modeling_mbart import MBartCrossAttention
return MBartCrossAttention(
embed_dim,
cfg.decoder_attention_heads,
dropout=cfg.attention_dropout,
)
# return MultiHeadAttention(
# embed_dim,
# cfg.decoder_attention_heads,
# kdim=cfg.encoder_embed_dim,
# vdim=cfg.encoder_embed_dim,
# dropout=cfg.attention_dropout,
# encoder_decoder_attention=True
# )
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
**kwargs
):
"""
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn, _ = self.self_attn(
hidden_states=x,
attention_mask=self_attn_mask,
output_attentions=False
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn, _ = self.encoder_attn(
hidden_states=x,
key_value_states=encoder_out,
attention_mask=encoder_padding_mask,
output_attentions=False,
# incremental=incremental, incremental_cache=incremental_cache,
# checkpointing=checkpointing_cross_attn,
# lang=lang, atb=atb
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, None
| 14,593 | 35.303483 | 95 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/activation_functions.py | import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import math
from typing import Dict, Optional, Tuple
import torch
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x) | 507 | 27.222222 | 91 | py |
NMTGMinor | NMTGMinor-master/onmt/models/deltalm/modules/layer_drop.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
LayerDrop as described in https://arxiv.org/abs/1909.11556.
"""
import torch
import torch.nn as nn
class LayerDropModuleList(nn.ModuleList):
"""
A LayerDrop implementation based on :class:`torch.nn.ModuleList`.
We refresh the choice of which layers to drop every time we iterate
over the LayerDropModuleList instance. During evaluation we always
iterate over all layers.
Usage::
layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
for layer in layers: # this might iterate over layers 1 and 3
x = layer(x)
for layer in layers: # this might iterate over all layers
x = layer(x)
for layer in layers: # this might not iterate over any layers
x = layer(x)
Args:
p (float): probability of dropping out each layer
modules (iterable, optional): an iterable of modules to add
"""
def __init__(self, p, modules=None):
super().__init__(modules)
self.p = p
def __iter__(self):
dropout_probs = torch.empty(len(self)).uniform_()
for i, m in enumerate(super().__iter__()):
if not self.training or (dropout_probs[i] > self.p):
yield m | 1,408 | 31.022727 | 71 | py |
NMTGMinor | NMTGMinor-master/onmt/reversible_models/reversible.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from torch.autograd.function import Function
import sys
from torch.utils.checkpoint import get_device_states, set_device_states
class ReversibleTransformerEncoder(nn.Module):
def __init__(self, opt, death_rate=0.0):
self.variational = opt.variational_dropout
d_model = opt.model_size
p = opt.dropout
self.death_rate = death_rate
self.dropout = p
h = opt.n_heads
attn_p = opt.attn_dropout
n_layers = opt.layers
super().__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.multihead = MultiHeadAttention(h, d_model, attn_p=attn_p, share=2)
ff_p = opt.dropout
self.feedforward = FeedForward(opt.model_size, opt.inner_size, ff_p, variational=self.variational) | 1,099 | 34.483871 | 106 | py |
NMTGMinor | NMTGMinor-master/onmt/reversible_models/relative_transformers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
# from onmt.modules.linear import FeedForward as position_wise_feed_forward
from onmt.modules.attention import MultiHeadAttention
# from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.layer_norm import LayerNorm
from torch.autograd.function import Function
import sys
from torch.utils.checkpoint import get_device_states, set_device_states
from onmt.modules.dropout import variational_dropout
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from ..modules.optimized.compat import custom_fwd, custom_bwd
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from ..modules.optimized.compat import half_function
def deterministic_dropout(input, p=0.5, training=True, seed=None):
if seed is not None:
torch.manual_seed(seed)
return nn.functional.dropout(input, p=p, training=training)
class RelativeSelfAttention(nn.Module):
def __init__(self, opt):
super().__init__()
# self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.layer_norm = LayerNorm((opt.model_size,), elementwise_affine=True)
# self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
# self.attn = RelPartialLearnableMultiHeadAttn(opt.n_heads, opt.model_size, opt.model_size // opt.n_heads,
# dropatt=opt.attn_dropout)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.attn = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, dropout=opt.attn_dropout,
learnable_pos=opt.learnable_pos)
self.variational = opt.variational_dropout
def forward(self, input, pos, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage = self.attn(q, pos, attn_mask, incremental=incremental, incremental_cache=incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage
class FeedForward(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.ffn_dropout = opt.ffn_dropout if opt.ffn_dropout >= 0 else opt.dropout
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, self.ffn_dropout,
variational=opt.variational_dropout, glu=opt.ffn_glu,
activation=opt.ffn_activation)
self.variational = opt.variational_dropout
def forward(self, input, cleaning=False):
x_norm = self.layer_norm(input)
x_ff = self.feedforward(x_norm)
if not self.variational:
o = F.dropout(x_ff, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(x_ff, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del x_norm, x_ff
return o
class SourceAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.residual_dropout = opt.residual_dropout if opt.residual_dropout >= 0 else opt.dropout
self.attn = EncdecMultiheadAttn(opt.n_heads, opt.model_size, attn_drop=opt.attn_dropout)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, context, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage = self.attn(q, context, context, attn_mask, incremental, incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.residual_dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.residual_dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage
class ReversibleEncoderFunction(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, layers, hidden_states, pos, attn_mask):
# attn_output, hidden_states = hidden_states, hidden_states # torch.chunk(hidden_states, 2, dim=-1)
first_input, second_input = hidden_states, hidden_states
# this block should be run under torch.no_grad()?
with torch.no_grad():
for layer in layers:
# forward pass in the layer
first_input, second_input = layer(
first_input, second_input, pos, attn_mask
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
ctx.save_for_backward(first_input, second_input, pos)
ctx.layers = layers
ctx.attn_mask = attn_mask # just in case attn_mask is None
with torch.no_grad():
output = first_input + second_input
# The only memory footprint is the last layer outputs and the "output".
return output
# concatenate 2 revnet outputs:
# return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
first_grad_output, second_grad_output = grad_output
# retrieve params from ctx
first_output, second_output, pos = ctx.saved_tensors
layers = ctx.layers
attn_mask = ctx.attn_mask
for idx, layer in enumerate(layers[::-1]):
# backprop
first_input, hidden_states, first_grad_output, second_grad_output = layer.backward_pass(
first_output, second_output, first_grad_output, second_grad_output, pos, attn_mask
)
grad_hidden_states = first_grad_output + second_grad_output
# the position encodings don't need embeddings
return grad_hidden_states, None, None, None
@half_function
def reversible_encoder(layers, hidden_states, pos, attn_mask):
return ReversibleEncoderFunction.apply(layers, hidden_states, pos, attn_mask)
class ReversibleTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.self_attn = RelativeSelfAttention(opt)
self.feedforward = FeedForward(opt)
self.death_rate = death_rate
self.forward_coin = True
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, pos, attn_mask=None):
"""
:param pos: position embeddings
:param x2:
:param x1:
:param attn_mask:
:return:
"""
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
self._init_attention_seed(x2)
z1, coverage = self.self_attn(x2, pos, attn_mask, cleaning=True)
y1 = z1 + x1
self._init_feedforward_seed(y1)
z2 = self.feedforward(y1, cleaning=True)
y2 = z2 + x2
del x1, x2, z1, z2
"""return Y1 and Y2"""
return y1, y2
def backward_pass(self, y1, y2, dy1, dy2, pos, attn_mask=None):
"""
:param pos:
:param y1:
:param y2:
:param dy1:
:param dy2:
:param attn_mask:
:return:
"""
"""Implementation of the backward pass for reversible transformer encoder"""
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
z2 = self.feedforward(y1)
# res_hidden_states.backward(grad_hidden_states, retain_grah=True)
torch.autograd.backward(z2, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - z2
del z2, y2
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
z1, _, _ = self.self_attn(x2, pos, attn_mask)
z1.backward(dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - z1
del y1, z1
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
return x1, x2, dx1, dx2
class ReversibleDecoderFunction(Function):
@staticmethod
def forward(ctx, layers, hidden_states, pos, context, tgt_mask, src_mask,
incremental=False, incremental_cache=None):
bsz, seq_len = hidden_states.shape[0], hidden_states.shape[1]
B = bsz * seq_len
idx = 0
attn_output, hidden_states = hidden_states, hidden_states
for layer in layers:
idx = idx + 1
# forward pass in the layer
attn_output, hidden_states, coverage, incremental_cache = layer(
attn_output, hidden_states, pos, context, tgt_mask, src_mask,
incremental=incremental, incremental_cache=incremental_cache
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
# detach() seems to be required especially for context ...
ctx.save_for_backward(attn_output, hidden_states, context, pos)
ctx.layers = layers
ctx.src_mask = src_mask
ctx.tgt_mask = tgt_mask
with torch.no_grad():
output = attn_output + hidden_states
# concatenate 2 revnet outputs:
return output
@staticmethod
def backward(ctx, grad_hidden_states):
# We need three arguments because the forward pass returned 3 arguments
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states, context, pos = ctx.saved_tensors
layers = ctx.layers
src_mask = ctx.src_mask
tgt_mask = ctx.tgt_mask
grad_context = None # we need to sum up the gradients of the context manually
for idx, layer in enumerate(layers[::-1]):
"""Note: Here for each layer we detach the context once because we need to consider it
as a separate variable and then later accumulate the gradients"""
attn_output, hidden_states, grad_attn_output, grad_hidden_states, grad_context_ = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states,
pos, context.detach(), tgt_mask, src_mask
)
if grad_context is None:
grad_context = grad_context_
elif grad_context_ is not None: # prevent ignoring layer making this None
grad_context.add_(grad_context_)
del grad_context_
grad_hidden_states = grad_attn_output + grad_hidden_states
return None, grad_hidden_states, grad_context, None, None, None, None
@half_function
def reversible_decoder(layers, hidden_states, pos, context, tgt_mask, src_mask, incremental, incremental_cache):
return ReversibleDecoderFunction.apply(layers, hidden_states, pos, context,
tgt_mask, src_mask, incremental, incremental_cache)
class ReversibleTransformerDecoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
# variational=False, death_rate=0.0):
def __init__(self, opt, death_rate=0.0):
super(ReversibleTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
assert not self.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.self_attention = RelativeSelfAttention(opt)
self.feed_forward_first = FeedForward(opt)
if not self.ignore_source:
self.src_attention = SourceAttention(opt)
self.feed_forward_second = FeedForward(opt)
def _init_src_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.src_attn_cpu_state = torch.get_rng_state()
self.src_attn_gpu_devices, self.src_attn_gpu_states = get_device_states(*args)
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward1_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn1_cpu_state = torch.get_rng_state()
self.ffn1_gpu_devices, self.ffn1_gpu_states = get_device_states(*args)
def _init_feedforward2_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn2_cpu_state = torch.get_rng_state()
self.ffn2_gpu_devices, self.ffn2_gpu_states = get_device_states(*args)
def forward(self, x1, x2, pos, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param pos:
:param x1: X1
:param x2: X2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if self.training:
# coin = (torch.rand(1)[0].item() >= self.death_rate)
#
# self.forward_coin = coin
with torch.no_grad():
# prepare the state for the first function (att > src->att)
self._init_attention_seed(x2)
f_x2, coverage, incremental_cache = self.self_attention(x2, pos, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
z1 = f_x2 + x1
self._init_feedforward1_seed()
g_z1 = self.feed_forward_first(z1, cleaning=True)
z2 = x2 + g_z1
# print("self_attention", z.sum() / (z.size(0) * z.size(1)))
# if not self.ignore_source:
self._init_src_attention_seed()
h_z2, coverage, incremental_cache = self.src_attention(z2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
y1 = z1 + h_z2
# prepare the state for the second function
self._init_feedforward2_seed(y1)
# print("y1", y1.sum() / (y1.size(0) * y1.size(1)))
k_y1 = self.feed_forward_second(y1, cleaning=True)
# if self.training and self.death_rate > 0:
# g_y1 = g_y1 / (1 - self.death_rate)
y2 = z2 + k_y1
"""return Y1 and Y2"""
return y1, y2, coverage, incremental_cache
def backward_pass(self, y1, y2, dy1, dy2, pos, context,
mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=False):
"""
:param pos:
:param y1
:param y2
:param dy1: dL/dX2
:param dy2: dL/dY2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if not self.forward_coin: # this layer was skipped, just return
# return y1, y2, dy1, dy2, None
# first block: recompute the ffn transition function
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn2_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn2_cpu_state)
set_device_states(self.ffn2_gpu_devices, self.ffn2_gpu_states)
k_y1 = self.feed_forward_second(y1)
torch.autograd.backward(k_y1, dy2) # get the gradients dk/dy1
with torch.no_grad():
# restore z2 = Y2 - K(Y1)
z2 = y2 - k_y1
# Dz1 = DY1 + Y1.grad
dz1 = dy1 + y1.grad
del y2, k_y1, dy1
y1.grad = None
# second block
with torch.enable_grad():
z2.requires_grad = True
context.requires_grad = True
with torch.random.fork_rng(devices=self.src_attn_gpu_devices, enabled=True):
torch.set_rng_state(self.src_attn_cpu_state)
set_device_states(self.src_attn_gpu_devices, self.src_attn_gpu_states)
# if not self.ignore_source:
h_z2, _, _ = self.src_attention(z2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
torch.autograd.backward(h_z2, dz1)
with torch.no_grad():
z1 = y1 - h_z2
del y1, h_z2
dz2 = dy2 + z2.grad
z2.grad = None
del dy2
grad_context = context.grad
del context.grad
# third block
with torch.enable_grad():
z1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn1_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn1_cpu_state)
set_device_states(self.ffn1_gpu_devices, self.ffn1_gpu_states)
g_z1, = self.feed_forward_second(z1, cleaning=True)
torch.autograd.backward(g_z1, dz2)
with torch.no_grad():
x2 = z2 - g_z1
del z2, g_z1
dx1 = dz1 + z1.grad
z1.grad = None
del dz1
# fourth block
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
f_x2, _, _ = self.self_attention(x2, pos, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
torch.autograd.backward(f_x2, dx1)
with torch.no_grad():
x1 = z1 - f_x2
del z1, f_x2
dx2 = dz2 + x2.grad
x2.grad = None
del dz2
return x1, x2, dx1, dx2, grad_context
| 22,745 | 35.865478 | 115 | py |
NMTGMinor | NMTGMinor-master/onmt/reversible_models/transformers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.linear import FeedForward as position_wise_feed_forward
from onmt.modules.attention import MultiHeadAttention
from torch.autograd.function import Function
import sys
from torch.utils.checkpoint import get_device_states, set_device_states
from onmt.modules.dropout import variational_dropout
def deterministic_dropout(input, p=0.5, training=True, seed=None):
if seed is not None:
torch.manual_seed(seed)
return nn.functional.dropout(input, p=p, training=training)
class SelfAttention(nn.Module):
def __init__(self, opt):
super().__init__()
# self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.layer_norm = nn.LayerNorm((opt.model_size,), elementwise_affine=True)
self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage, incremental_cache = self.attn(q, q, q, attn_mask,
incremental=incremental, incremental_cache=incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage, incremental_cache
class FeedForward(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = nn.LayerNorm((opt.model_size, ), elementwise_affine=True)
# self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.feedforward = position_wise_feed_forward(opt.model_size, opt.inner_size, opt.dropout,
variational=opt.variational_dropout)
self.dropout = opt.dropout
self.variational = opt.variational_dropout
def forward(self, input, cleaning=False):
x_norm = self.layer_norm(input)
x_ff = self.feedforward(x_norm)
if not self.variational:
o = F.dropout(x_ff, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(x_ff, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del x_norm, x_ff
return o
class SourceAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=2)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, context, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage, incremental_cache = self.attn(q, context, context, attn_mask, incremental, incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage, incremental_cache
class ReversibleEncoderFunction(Function):
@staticmethod
def forward(ctx, hidden_states, layers, attn_mask):
attn_output, hidden_states = torch.chunk(hidden_states, 2, dim=-1)
for layer in layers:
# forward pass in the layer
attn_output, hidden_states = layer(
attn_output, hidden_states, attn_mask
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
ctx.save_for_backward(attn_output.detach(), hidden_states.detach())
# ctx.save_for_backward(attn_output, hidden_states)
ctx.layers = layers
ctx.attn_mask = attn_mask
with torch.no_grad():
output = attn_output + hidden_states
return output
# concatenate 2 revnet outputs:
# return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
def backward(ctx, grad_hidden_states):
# print(grad_hidden_states.sum())
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states = ctx.saved_tensors
layers = ctx.layers
attn_mask = ctx.attn_mask
for idx, layer in enumerate(layers[::-1]):
# backprop
attn_output, hidden_states, grad_attn_output, grad_hidden_states = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states, attn_mask
)
grad_hidden_states = torch.cat([grad_attn_output, grad_hidden_states], dim=-1)
return grad_hidden_states, None, None
class ReversibleTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.self_attn = SelfAttention(opt)
self.feedforward = FeedForward(opt)
self.death_rate = death_rate
self.forward_coin = True
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, attn_mask=None):
"""
:param x2:
:param x1:
:param attn_mask:
:return:
"""
with torch.no_grad():
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
self._init_attention_seed(x2)
z1, _, _ = self.self_attn(x2, attn_mask, cleaning=True)
y1 = z1 + x1
self._init_feedforward_seed(y1)
z2 = self.feedforward(y1, cleaning=True)
y2 = z2 + x2
del x1, x2, z1, z2
"""return Y1 and Y2"""
return y1, y2
def backward_pass(self, y1, y2, dy1, dy2, attn_mask=None):
"""
:param y1:
:param y2:
:param dy1:
:param dy2:
:param attn_mask:
:return:
"""
"""Implementation of the backward pass for reversible transformer encoder"""
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
z2 = self.feedforward(y1)
# res_hidden_states.backward(grad_hidden_states, retain_graph=True)
torch.autograd.backward(z2, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - z2
del z2, y2
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
z1, _, _ = self.self_attn(x2, attn_mask)
z1.backward(dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - z1
del y1, z1
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
return x1, x2, dx1, dx2
class ReversibleDecoderFunction(Function):
@staticmethod
def forward(ctx, hidden_states, context, layers, tgt_mask, src_mask,
incremental=False, incremental_cache=None):
bsz, seq_len = hidden_states.shape[0], hidden_states.shape[1]
B = bsz * seq_len
idx = 0
attn_output, hidden_states = torch.chunk(hidden_states, 2, dim=-1)
# print(attn_output.sum()/B, hidden_states.sum()/B)
for layer in layers:
idx = idx + 1
# forward pass in the layer
attn_output, hidden_states, coverage, incremental_cache = layer(
attn_output, hidden_states, context, tgt_mask, src_mask,
incremental=incremental, incremental_cache=incremental_cache
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
# detach() seems to be required especially for context ...
ctx.save_for_backward(attn_output, hidden_states, context)
ctx.layers = layers
ctx.src_mask = src_mask
ctx.tgt_mask = tgt_mask
with torch.no_grad():
output = attn_output + hidden_states
# concatenate 2 revnet outputs:
return output
@staticmethod
def backward(ctx, grad_hidden_states):
# We need three arguments because the forward pass returned 3 arguments
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states, context = ctx.saved_tensors
layers = ctx.layers
src_mask = ctx.src_mask
tgt_mask = ctx.tgt_mask
grad_context = None # we need to sum up the gradients of the context manually
for idx, layer in enumerate(layers[::-1]):
# backprop
"""Note: Here for each layer we detach the context once because we need to consider it
as a separate variable and then later accumulate the gradients"""
attn_output, hidden_states, grad_attn_output, grad_hidden_states, grad_context_ = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states,
context.detach(), tgt_mask, src_mask
)
# with torch.no_grad():
if grad_context is None:
grad_context = grad_context_
elif grad_context_ is not None: # prevent ignoring layer making this None
grad_context.add_(grad_context_)
del grad_context_
grad_hidden_states = torch.cat([grad_attn_output, grad_hidden_states], dim=-1)
return grad_hidden_states, grad_context, None, None, None, None, None
class ReversibleTransformerDecoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
# variational=False, death_rate=0.0):
def __init__(self, opt, death_rate=0.0):
super(ReversibleTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
assert not self.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.self_attention = SelfAttention(opt)
self.feed_forward = FeedForward(opt)
if not self.ignore_source:
self.src_attention = SourceAttention(opt)
def _init_src_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.src_attn_cpu_state = torch.get_rng_state()
self.src_attn_gpu_devices, self.src_attn_gpu_states = get_device_states(*args)
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param x1: X1
:param x2: X2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if self.training:
# coin = (torch.rand(1)[0].item() >= self.death_rate)
#
# self.forward_coin = coin
with torch.no_grad():
# print("x1", x1.sum() / (x1.size(0) * x2.size(1)))
# print("x2", x2.sum() / (x2.size(0) * x2.size(1)))
# prepare the state for the first function (att > src->att)
self._init_attention_seed(x2)
f_x2, coverage, incremental_cache = self.self_attention(x2, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
z = f_x2
# print("self_attention", z.sum() / (z.size(0) * z.size(1)))
# if not self.ignore_source:
f_x2, coverage, incremental_cache = self.src_attention(f_x2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
# print("src_attention", f_x2.sum() / (f_x2.size(0) * f_x2.size(1)))
f_x2 = f_x2 + z
del z
# if self.training and self.death_rate > 0:
# f_x2 = f_x2 / (1 - self.death_rate)
y1 = x1 + f_x2
# del f_x2, x1
# prepare the state for the second function
self._init_feedforward_seed(y1)
# print("y1", y1.sum() / (y1.size(0) * y1.size(1)))
g_y1 = self.feed_forward(y1, cleaning=True)
# if self.training and self.death_rate > 0:
# g_y1 = g_y1 / (1 - self.death_rate)
y2 = x2 + g_y1
# print("y2", y2.sum() / (y2.size(0) * y2.size(1)))
del g_y1, x2
"""return Y1 and Y2"""
return y1, y2, coverage, incremental_cache
def backward_pass(self, y1, y2, dy1, dy2, context,
mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=False):
"""
:param y1
:param y2
:param dy1: dL/dX2
:param dy2: dL/dY2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# if not self.forward_coin: # this layer was skipped, just return
# return y1, y2, dy1, dy2, None
# first block: recompute the ffn transition function
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
g_y1 = self.feed_forward(y1)
torch.autograd.backward(g_y1, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - g_y1
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del y2, g_y1, dy1
y1.grad = None
# second block
with torch.enable_grad():
x2.requires_grad = True
context.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
f_x2, coverage, incremental_cache = self.self_attention(x2, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
z = f_x2
# if not self.ignore_source:
f_x2, _, _ = self.src_attention(f_x2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
f_x2 = f_x2 + z
torch.autograd.backward(f_x2, dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - f_x2
del y1, f_x2
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
grad_context = context.grad
del context.grad
# # third block
# with torch.enable_grad():
# x2.requires_grad = True
#
# with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
# torch.set_rng_state(self.attn_cpu_state)
# set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
#
# f_x2, _, _ = self.self_attention(x2, mask_tgt)
#
# if self.training and self.death_rate > 0:
# f_x2 = f_x2 / (1 - self.death_rate)
#
# torch.autograd.backward(f_x2, dz1)
#
# with torch.no_grad():
# # restore X1 = Y1 - F(X2)
# x1 = z1 - f_x2
#
# dx1 = dz1
# dx2 = dy2 + x2.grad
# del z1, f_x2
#
# x2.grad = None
# x2 = x2.detach()
return x1, x2, dx1, dx2, grad_context
| 20,232 | 35.001779 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/reversible_models/transformers_testing2.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing
from onmt.modules.linear import FeedForward as position_wise_feed_forward
from onmt.modules.attention import MultiHeadAttention
from torch.autograd.function import Function
import sys
from torch.utils.checkpoint import get_device_states, set_device_states
from onmt.modules.dropout import variational_dropout
def deterministic_dropout(input, p=0.5, training=True, seed=None):
if seed is not None:
torch.manual_seed(seed)
return nn.functional.dropout(input, p=p, training=training)
class SelfAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=1)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage, incremental_cache = self.attn(q, q, q, attn_mask,
incremental=incremental, incremental_cache=incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage, incremental_cache
class FeedForward(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.feedforward = position_wise_feed_forward(opt.model_size, opt.inner_size, opt.dropout,
variational=opt.variational_dropout)
self.dropout = opt.dropout
self.variational = opt.variational_dropout
def forward(self, input, cleaning=False):
x_norm = self.layer_norm(input)
x_ff = self.feedforward(x_norm)
if not self.variational:
o = F.dropout(x_ff, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(x_ff, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del x_norm, x_ff
return o
class SourceAttention(nn.Module):
def __init__(self, opt):
super().__init__()
self.layer_norm = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.attn = MultiHeadAttention(opt.n_heads, opt.model_size, attn_p=opt.attn_dropout, share=2)
self.dropout = opt.attn_dropout
self.variational = opt.variational_dropout
def forward(self, input, context, attn_mask=None, incremental=False, incremental_cache=None, cleaning=False):
q = self.layer_norm(input)
attn, coverage, incremental_cache = self.attn(q, context, context, attn_mask, incremental, incremental_cache)
if not self.variational:
o = F.dropout(attn, p=self.dropout, training=self.training, inplace=False)
else:
o = variational_dropout(attn, p=self.dropout, inplace=False, training=self.training)
if cleaning:
del q, attn
return o, coverage, incremental_cache
class ReversibleEncoderFunction(Function):
@staticmethod
def forward(ctx, hidden_states, layers, attn_mask):
attn_output, hidden_states = torch.chunk(hidden_states, 2, dim=-1)
for layer in layers:
# forward pass in the layer
attn_output, hidden_states = layer(
attn_output, hidden_states, attn_mask
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
ctx.save_for_backward(attn_output.detach(), hidden_states.detach())
# ctx.save_for_backward(attn_output, hidden_states)
ctx.layers = layers
ctx.attn_mask = attn_mask
with torch.no_grad():
output = attn_output + hidden_states
return output
# concatenate 2 revnet outputs:
# return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
def backward(ctx, grad_hidden_states):
# print(grad_hidden_states.sum())
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states = ctx.saved_tensors
layers = ctx.layers
attn_mask = ctx.attn_mask
for idx, layer in enumerate(layers[::-1]):
# backprop
attn_output, hidden_states, grad_attn_output, grad_hidden_states = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states, attn_mask
)
grad_hidden_states = torch.cat([grad_attn_output, grad_hidden_states], dim=-1)
return grad_hidden_states, None, None
class ReversibleTransformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.self_attn = SelfAttention(opt)
self.feedforward = FeedForward(opt)
self.death_rate = death_rate
self.forward_coin = True
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, attn_mask=None):
"""
:param x2:
:param x1:
:param attn_mask:
:return:
"""
with torch.no_grad():
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
coin = True
if self.training:
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
self.forward_coin = coin
if coin:
self._init_attention_seed(x2)
z1, _, _ = self.self_attn(x2, attn_mask, cleaning=True)
if self.training and self.death_rate > 0:
z1 = z1 / (1 - self.death_rate)
y1 = z1 + x1
self._init_feedforward_seed(y1)
z2 = self.feedforward(y1, cleaning=True)
if self.training and self.death_rate > 0:
z2 = z2 / (1 - self.death_rate)
y2 = z2 + x2
del x1, x2, z1, z2
else:
y1 = x1
y2 = x2
"""return Y1 and Y2"""
return y1, y2
def backward_pass(self, y1, y2, dy1, dy2, attn_mask=None):
"""
:param y1:
:param y2:
:param dy1:
:param dy2:
:param attn_mask:
:return:
"""
"""Implementation of the backward pass for reversible transformer encoder"""
if not self.forward_coin: # this layer was skipped, just return
return y1, y2, dy1, dy2
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
z2 = self.feedforward(y1)
if self.training and self.death_rate > 0:
z2 = z2 / (1 - self.death_rate)
# res_hidden_states.backward(grad_hidden_states, retain_graph=True)
torch.autograd.backward(z2, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - z2
del z2, y2
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
z1, _, _ = self.self_attn(x2, attn_mask)
if self.training and self.death_rate > 0:
z1 = z1 / (1 - self.death_rate)
z1.backward(dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - z1
del y1, z1
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
return x1, x2, dx1, dx2
class ReversibleDecoderFunction(Function):
@staticmethod
def forward(ctx, hidden_states, context, layers, tgt_mask, src_mask,
incremental=False, incremental_cache=None):
attn_output, hidden_states = torch.chunk(hidden_states, 2, dim=-1)
for layer in layers:
# forward pass in the layer
attn_output, hidden_states, coverage, incremental_cache = layer(
attn_output, hidden_states, context, tgt_mask, src_mask,
incremental=incremental, incremental_cache=incremental_cache
)
# attach params to ctx for backward
# why should we detach here? because Y1 Y2 were built within torch.no_grad()
# so cutting the backward from these variables seems unnecessary
# save_for_backward will release memory more efficiently
# detach() seems to be required especially for context ...
ctx.save_for_backward(attn_output, hidden_states, context)
ctx.layers = layers
ctx.src_mask = src_mask
ctx.tgt_mask = tgt_mask
with torch.no_grad():
output = attn_output + hidden_states
# concatenate 2 revnet outputs:
return output
@staticmethod
def backward(ctx, grad_hidden_states):
# We need three arguments because the forward pass returned 3 arguments
# grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
grad_attn_output = grad_hidden_states
# retrieve params from ctx
attn_output, hidden_states, context = ctx.saved_tensors
layers = ctx.layers
src_mask = ctx.src_mask
tgt_mask = ctx.tgt_mask
grad_context = None # we need to sum up the gradients of the context manually
for idx, layer in enumerate(layers[::-1]):
# backprop
"""Note: Here for each layer we detach the context once because we need to consider it
as a separate variable and then later accumulate the gradients"""
attn_output, hidden_states, grad_attn_output, grad_hidden_states, grad_context_ = layer.backward_pass(
attn_output, hidden_states, grad_attn_output, grad_hidden_states,
context.detach(), tgt_mask, src_mask
)
# with torch.no_grad():
if grad_context is None:
grad_context = grad_context_
elif grad_context_ is not None: # prevent ignoring layer making this None
grad_context.add_(grad_context_)
del grad_context_
grad_hidden_states = torch.cat([grad_attn_output, grad_hidden_states], dim=-1)
return grad_hidden_states, grad_context, None, None, None, None, None
class ReversibleTransformerDecoderLayer(nn.Module):
# def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
# variational=False, death_rate=0.0):
def __init__(self, opt, death_rate=0.0):
super(ReversibleTransformerDecoderLayer, self).__init__()
self.ignore_source = opt.ignore_source
assert not self.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.dropout = opt.dropout
self.self_attention = SelfAttention(opt)
self.feed_forward = FeedForward(opt)
if not self.ignore_source:
self.src_attention = SourceAttention(opt)
def _init_src_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
self.src_attn_cpu_state = torch.get_rng_state()
self.src_attn_gpu_devices, self.src_attn_gpu_states = get_device_states(*args)
def _init_attention_seed(self, *args):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.attn_cpu_state = torch.get_rng_state()
self.attn_gpu_devices, self.attn_gpu_states = get_device_states(*args)
def _init_feedforward_seed(self, *args):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
self.ffn_cpu_state = torch.get_rng_state()
self.ffn_gpu_devices, self.ffn_gpu_states = get_device_states(*args)
def forward(self, x1, x2, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param x1: X1
:param x2: X2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
self.forward_coin = coin
if coin:
with torch.no_grad():
# prepare the state for the first function (att > src->att)
self._init_attention_seed(x2)
f_x2, coverage, incremental_cache = self.self_attention(x2, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
z = f_x2
# if not self.ignore_source:
f_x2, coverage, incremental_cache = self.src_attention(f_x2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache,
cleaning=True)
f_x2 = f_x2 + z
del z
if self.training and self.death_rate > 0:
f_x2 = f_x2 / (1 - self.death_rate)
y1 = x1 + f_x2
del f_x2, x1
# prepare the state for the second function
self._init_feedforward_seed(y1)
g_y1 = self.feed_forward(y1, cleaning=True)
if self.training and self.death_rate > 0:
g_y1 = g_y1 / (1 - self.death_rate)
y2 = x2 + g_y1
del g_y1, x2
else:
y1, y2 = x1, x2
coverage = None
"""return Y1 and Y2"""
return y1, y2, coverage, incremental_cache
def backward_pass(self, y1, y2, dy1, dy2, context,
mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=False):
"""
:param y1
:param y2
:param dy1: dL/dX2
:param dy2: dL/dY2
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
if not self.forward_coin: # this layer was skipped, just return
return y1, y2, dy1, dy2, None
# first block: recompute the ffn transition function
with torch.enable_grad():
y1.requires_grad = True
with torch.random.fork_rng(devices=self.ffn_gpu_devices, enabled=True):
torch.set_rng_state(self.ffn_cpu_state)
set_device_states(self.ffn_gpu_devices, self.ffn_gpu_states)
g_y1 = self.feed_forward(y1)
if self.training and self.death_rate > 0:
g_y1 = g_y1 / (1 - self.death_rate)
torch.autograd.backward(g_y1, dy2)
with torch.no_grad():
# restore X2 = Y2 - G(Y1)
x2 = y2 - g_y1
# DX1 = DY1 + Y1.grad
dx1 = dy1 + y1.grad
del y2, g_y1, dy1
y1.grad = None
# second block
with torch.enable_grad():
x2.requires_grad = True
context.requires_grad = True
with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
torch.set_rng_state(self.attn_cpu_state)
set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
f_x2, coverage, incremental_cache = self.self_attention(x2, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
z = f_x2
# if not self.ignore_source:
f_x2, _, _ = self.src_attention(f_x2, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
f_x2 = f_x2 + z
if self.training and self.death_rate > 0:
f_x2 = f_x2 / (1 - self.death_rate)
torch.autograd.backward(f_x2, dx1)
with torch.no_grad():
# restore X1 = Y1 - F(X2)
x1 = y1 - f_x2
del y1, f_x2
dx2 = dy2 + x2.grad
x2.grad = None
del dy2
x2 = x2.detach()
grad_context = context.grad
del context.grad
# # third block
# with torch.enable_grad():
# x2.requires_grad = True
#
# with torch.random.fork_rng(devices=self.attn_gpu_devices, enabled=True):
# torch.set_rng_state(self.attn_cpu_state)
# set_device_states(self.attn_gpu_devices, self.attn_gpu_states)
#
# f_x2, _, _ = self.self_attention(x2, mask_tgt)
#
# if self.training and self.death_rate > 0:
# f_x2 = f_x2 / (1 - self.death_rate)
#
# torch.autograd.backward(f_x2, dz1)
#
# with torch.no_grad():
# # restore X1 = Y1 - F(X2)
# x1 = z1 - f_x2
#
# dx1 = dz1
# dx2 = dy2 + x2.grad
# del z1, f_x2
#
# x2.grad = None
# x2 = x2.detach()
return x1, x2, dx1, dx2, grad_context
| 20,730 | 34.559177 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/speech/Augmenter.py | import math
import torch
from collections import defaultdict
import onmt
import random
class Augmenter(object):
"""
Implementation of the "Spec Augmentation" method
(Only vertical and horizontal masking)
"""
def __init__(self, F=8, mf=2, T=64, max_t=0.2, mt=2,
input_size=40, concat=4):
self.F = F
self.mf = mf
self.T = T
self.max_t = max_t
self.mt = mt
self.input_size = input_size
self.concat = concat
print("[INFO] Spec-Augmentation with input size %d F=%d, T=%d" % (self.input_size, F, T))
def augment(self, tensor):
feat_size = tensor.size(1)
original_len = tensor.size(0)
# reshape_size = feat_size / self.input_size
tensor = tensor.float()
# First we have to upsample the tensor (if it was downsampled during preprocessing)
# # Copy to a new storage because otherwise it is zeroed permanently`
tensor_ = tensor.view(-1, self.input_size).new(*tensor.size()).copy_(tensor)
for _ in range(self.mf):
# frequency masking (second dimension)
# 40 is the number of features (logmel)
f = int(random.uniform(0.0, self.F))
f_0 = int(random.uniform(0.0, 40 - f))
tensor_[:, f_0:f_0 + f].zero_()
for _ in range(self.mt):
# time masking (first dimension)
t = int(random.uniform(0.0, self.T))
t = min(t, int(self.max_t * original_len))
if original_len - t < 0:
continue
t_0 = int(random.uniform(0.0, original_len - t - 1))
tensor_[t_0: t_0 + t].zero_()
# reshaping back to downsampling
tensor__ = tensor_.view(original_len, feat_size)
return tensor__
| 1,821 | 26.19403 | 97 | py |
NMTGMinor | NMTGMinor-master/onmt/speech/ctc_loss.py | from distutils.version import LooseVersion
import numpy as np
import six
import torch
import torch.nn.functional as F
import onmt
class CTC(torch.nn.Module):
def __init__(self, vocab_size, hidden_size, dropout_rate,
ctc_type="builtin", reduce=True,
padding_idx=-1, blank_idx=0):
super().__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
if padding_idx == -1:
self.padding_idx = onmt.constants.PAD
else:
self.padding_idx = padding_idx
if blank_idx == -1:
self.blank_idx = onmt.constants.TGT_PAD
else:
self.blank_idx = blank_idx
# why do we need dropout at ctc ?
self.dropout_rate = dropout_rate
# In case of Pytorch >= 1.7.0, CTC will be always builtin
self.ctc_type = (
ctc_type
if LooseVersion(torch.__version__) < LooseVersion("1.7.0")
else "builtin"
)
if ctc_type != self.ctc_type:
logging.warning(f"CTC was set to {self.ctc_type} due to PyTorch version.")
if self.ctc_type == "builtin":
reduction_type = "sum" if reduce else "none"
self.ctc_loss = torch.nn.CTCLoss(blank=onmt.constants.TGT_PAD, reduction=reduction_type, zero_infinity=True)
elif self.ctc_type == "warpctc":
import warpctc_pytorch as warp_ctc
self.ctc_loss = warp_ctc.CTCLoss(size_average=False, length_average=False)
else:
raise ValueError(
'ctc_type must be "builtin" or "warpctc": {}'.format(self.ctc_type)
)
self.ignore_id = -1
self.reduce = reduce
def compute_loss(self, logits, targets, ilen, olen):
"""
:param logits:
:param targets:
:param ilen:
:param olen:
:return:
"""
if self.ctc_type == "builtin":
log_probs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
# Use the deterministic CuDNN implementation of CTC loss to avoid
# [issue#17798](https://github.com/pytorch/pytorch/issues/17798)
with torch.backends.cudnn.flags(deterministic=True):
loss = self.ctc_loss(log_probs, targets, ilen, olen)
return loss
elif self.ctc_type == "warpctc":
return self.ctc_loss(logits, targets, ilen, olen)
else:
raise NotImplementedError
def forward(self, model_outputs, targets, **kwargs):
# context logits: T x B x V
# targets: T x B
logits = model_outputs['encoder_logits']
if 'wav2vec_padding_mask' in model_outputs:
source_mask = model_outputs['wav2vec_padding_mask'].long()
else:
source_mask = model_outputs['src_mask'].long()
# target mask should be T x B
target_mask = targets.ne(self.padding_idx)
target_lengths = target_mask.long().sum(0)
# source mask should be B x 1 x T or B x T
if source_mask.dim() == 3:
input_lengths = (1 - source_mask).squeeze(1).sum(1)
else:
input_lengths = (1 - source_mask).sum(1)
# print("MAX SOURCE LENGTH", logits.size(0), logits.size())
# print(input_lengths)
# print("MAX LENGTH", targets.size(0), targets.size())
# print(target_lengths)
if self.ctc_type == 'builtin':
# target is batch first
targets = targets.transpose(0, 1)
loss = self.compute_loss(logits, targets, input_lengths, target_lengths)
return loss
| 3,645 | 30.162393 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/data/mmap_indexed_dataset.py | import os
import struct
import numpy as np
import torch
import torch.utils.data
from functools import lru_cache
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float32,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
# class MMapIndexedDataset(torch.utils.data.Dataset):
class MMapIndexedDataset(object):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
# return torch.from_numpy(np_array)
# to avoid the warning
return torch.from_numpy(np.array(np_array))
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int32):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
if isinstance(tensor, torch.Tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
else:
np_array = tensor.astype(self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes) | 6,566 | 27.184549 | 105 | py |
NMTGMinor | NMTGMinor-master/onmt/data/wav_dataset.py | import torch
import torchaudio as taudio
from functools import lru_cache
from onmt.utils import safe_readaudio
import numpy as np
import soundfile
import math
import torchaudio
import os
# this function reads wav file based on the timestamp in seconds
def safe_readaudio_from_cache(file_, wav_path, start=0.0, end=0.0, sample_rate=16000):
offset = math.floor(sample_rate * start)
num_frames = -1 if end <= start else math.ceil(sample_rate * (end - start))
if file_ is not None:
dtype = "float32"
frames = file_._prepare_read(offset, None, num_frames)
waveform = file_.read(frames, dtype, always_2d=True)
sample_rate_ = file_.samplerate
tensor = torch.from_numpy(waveform)
tensor = tensor[:, 0].unsqueeze(1)
else:
tensor = tensor[:, 0].unsqueeze(1)
# select the first channel?
# tensor has size [length, num_channel] in which channel should be 1 for wav2vec
return tensor
class WavDataset(torch.utils.data.Dataset):
def __init__(self, wav_path_list, cache_size=0):
"""
:param scp_path_list: list of path to the ark matrices
"""
self.wav_path_list = wav_path_list
self._sizes = len(self.wav_path_list)
self._dtype = torch.float32
if cache_size > 0:
self.cache = dict()
self.usage = dict()
else:
self.cache = None
self.cache_size = cache_size
def flush_cache(self):
if self.cache is not None:
for wav_path in self.cache:
self.cache[wav_path].close()
self.cache[wav_path] = None
self.cache = dict()
self.usage = dict()
@property
def dtype(self):
# I'm not sure when this function is called
return self._dtype
@property
def sizes(self):
return self._sizes
def __len__(self):
return self._sizes
def __getitem__(self, i):
wav_info = self.wav_path_list[i]
# it should be a tuple (wav_file, start, end)
wav_path, start, end, sample_rate = wav_info
# there are many utterances sharing the save wavfiles -> we can keep the same object in memory
if self.cache is not None:
# take the object in cache if exists
if wav_path in self.cache:
file_ = self.cache[wav_path]
self.usage[wav_path] = self.usage[wav_path] + 1
else:
# read the audio file
# print(os.path.exists(wav_path), wav_path)
try:
file_ = soundfile.SoundFile(wav_path, 'r')
except RuntimeError as e:
print("Wavpath invalid:", wav_path, os.path.exists(wav_path))
raise e
if len(self.cache) > self.cache_size:
# remove 1 file from cache based on lowest usage, maybe?
min_key = min(self.usage, key=self.usage.get)
if min_key != file_:
self.cache[min_key].close()
self.cache.pop(min_key, None)
self.usage.pop(min_key, None)
# add the object to the cache
self.cache[wav_path] = file_
self.usage[wav_path] = 1
data = safe_readaudio_from_cache(file_, wav_path, start, end, sample_rate)
else:
file_ = None
data = safe_readaudio(wav_path, start, end, sample_rate)
return data
| 3,544 | 30.371681 | 102 | py |
NMTGMinor | NMTGMinor-master/onmt/data/multistream_dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.speech.Augmenter import Augmenter
from onmt.modules.dropout import switchout
"""
Data management for stream-to-stream models
Two basic classes:
- Batch stores the input / output sequences, grouped into tensors with the same length (by padding)
- Dataset stores all of the data and
"""
class Stream(object):
# An object to manage the data within a stream
def __init__(self, src_data, tgt_data=None,
src_lang_data=None, tgt_lang_data=None,
src_type='text',
length_multiplier=1,
augmenter=None, upsampling=False,
**kwargs):
"""
:param src_data: list of source tensors
:param tgt_data: list of target tensors
:param src_lang_data: list of language features for the source (TB finished)
:param tgt_lang_data: list of language features for the target (TB finished)
:param src_type: text or audio
:param reshape_speech: the number of frames to be reshaped
:param augmenter: using augmentation for speech
:param merge: if the two sequences are going to be merged for Relative Transformer
"""
self.tensors = defaultdict(lambda: None)
self.has_target = False
self.src_type = src_type
# self.upsampling = upsampling
# self.feature_size = kwargs.get('feature_size', 40)
self.length_mutliplier = length_multiplier
if src_data is not None:
self.tensors['source'], self.tensors['source_pos'], self.src_lengths = \
self.collate(src_data,
type=self.src_type,
augmenter=augmenter)
self.tensors['src_length'] = self.src_lengths
self.src_size = sum(self.src_lengths)
else:
self.src_size = 0
if tgt_data is not None:
target_full, target_pos, self.tgt_lengths = self.collate(tgt_data)
# self.tensors['target'] = target_full
# self.tensors['target_input'] = target_full[:-1]
# the last sentence has one element (eos) missing
# self.tgt_lengths[-1] = self.tgt_lengths[-1] - 1
# self.tensors['target_output'] = target_full[1:]
# self.tensors['target_pos'] = target_pos[:-1]
self.tensors['target_input'], self.tensors['target_output'], \
self.tensors['target_pos'], self.tgt_lengths = self.collate(tgt_data, target=True)
self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
self.has_target = True
self.tgt_size = sum([len(x) - 1 for x in tgt_data])
else:
self.tgt_size = 0
self.size = len(src_data) if src_data is not None else len(tgt_data)
if src_lang_data is not None:
self.tensors['source_lang'] = torch.cat(src_lang_data).long()
if tgt_lang_data is not None:
self.tensors['target_lang'] = torch.cat(tgt_lang_data).long()
def switchout(self, swrate, src_vocab_size, tgt_vocab_size):
# Switch out function ... currently works with only source text data
if self.src_type == 'text':
self.tensors['source'] = switchout(self.tensors['source'], src_vocab_size, swrate, transpose=True)
if self.has_target:
self.tensors['target'] = switchout(self.tensors['target'], tgt_vocab_size, swrate, transpose=True, offset=1)
target_full = self.tensors['target']
self.tensors['target_input'] = target_full[:-1]
self.tensors['target_output'] = target_full[1:]
self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
# down sampling the speech signal by simply concatenating n features (reshaping)
def downsample(self, data):
if self.reshape_speech == 0:
return data
else:
concat = self.reshape_speech
tensor_ = data.float() # adding float because of fp16 data storage
add = (concat - tensor_.size()[0] % concat) % concat
z = torch.FloatTensor(add, tensor_.size()[1]).zero_()
# adding an additional dimension as padding
tensor_ = torch.cat((tensor_, z), 0)
tensor_ = tensor_.reshape((int(tensor_.size()[0] / concat), tensor_.size()[1] * concat))
return tensor_
def augment_speech(self):
return
def collate(self, data, type="text", augmenter=None, target=False):
"""
Assembling the individual sequences into one single tensor, included padding
:param target:
:param data: the list of sequences in chronological order
:param type: text or audio
:param augmenter: for augmentation in audio models
:return:
data (list of Torch.Tensor) size 1 x T
"""
if type == "text":
if not target:
lengths = torch.LongTensor([x.size(0) for x in data])
positions = [torch.arange(length_) for length_ in lengths]
positions = torch.cat(positions)
# the last part is padded (so that the actual batch size divides by the multiplier
# tensor_length = math.ceil(sum(lengths) / self.length_mutliplier) * self.length_mutliplier
tensor_length = torch.sum(lengths).item()
# create a placeholder for the data
tensor = data[0].new(tensor_length).fill_(onmt.constants.PAD)
offset = 0
for sample in data:
current_length = sample.size(0)
tensor.narrow(0, offset, current_length).copy_(sample)
offset += current_length
tensor = tensor.unsqueeze(1) # batch size is 1
return tensor, positions, lengths
else:
# because we take the last unit away
lengths = torch.LongTensor([x.size(0) - 1 for x in data])
positions = [torch.arange(length_) for length_ in lengths]
positions = torch.cat(positions)
tensor_length = torch.sum(lengths).item()
# create a placeholder for the data
input = data[0].new(tensor_length).fill_(onmt.constants.PAD)
# create a placeholder for the data
target = data[0].new(tensor_length).fill_(onmt.constants.PAD)
offset = 0
for sample in data:
current_length = sample.size(0) - 1
input.narrow(0, offset, current_length).copy_(sample[:-1])
target.narrow(0, offset, current_length).copy_(sample[1:])
offset += current_length
input = input.unsqueeze(1)
target = target.unsqueeze(1)
return input, target, positions, lengths
elif type == "audio":
raise NotImplementedError
#
# # First step: on-the-fly processing for the samples
# # Reshaping: either downsampling or upsampling
# # On the fly augmentation
# samples = []
#
# for i in range(len(data)):
# sample = data[i]
#
# if augmenter is not None:
# sample = augmenter.augment(sample)
#
# if self.upsampling:
# sample = sample.view(-1, self.feature_size)
#
# samples.append(sample)
#
# # compute the lengths afte on-the-fly processing
# lengths = [x.size(0) for x in samples]
#
# max_length = max(lengths)
#
# # allocate data for the batch speech
# feature_size = samples[0].size(1)
# batch_size = len(data)
#
# # feature size + 1 because the last dimension is created for padding
# tensor = data[0].float().new(batch_size, max_length, feature_size + 1).fill_(onmt.constants.PAD)
#
# for i in range(len(samples)):
# sample = samples[i]
#
# data_length = sample.size(0)
# offset = max_length - data_length if align_right else 0
#
# tensor[i].narrow(0, offset, data_length).narrow(1, 1, sample.size(1)).copy_(sample)
# # in padding dimension: 0 is not padded, 1 is padded
# tensor[i].narrow(0, offset, data_length).narrow(1, 0, 1).fill_(1)
#
# return tensor, None, lengths
# else:
# raise NotImplementedError
def get(self, name):
if name in self.tensors:
return self.tensors[name]
else:
return None
def cuda(self, fp16=False):
"""
Send the minibatch data into GPU. Old-fashioned without the 'device' control
:param fp16:
:return: None
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
v = tensor[k]
tensor[k] = v.cuda()
elif tensor is not None:
if tensor.type() == "torch.FloatTensor" and fp16:
self.tensors[key] = tensor.half()
self.tensors[key] = self.tensors[key].cuda()
else:
continue
class StreamDataset(torch.utils.data.Dataset):
def __init__(self, src_data, tgt_data,
src_langs=None, tgt_langs=None,
batch_size_words=2048,
data_type="text", batch_size_sents=128,
multiplier=1, cleaning=False,
augment=False, debug=False,
**kwargs):
"""
:param src_data: List of tensors for the source side (1D for text, 2 or 3Ds for other modalities)
:param tgt_data: List of tensors (1D text) for the target side (already padded with <s> and </s>
:param src_langs: Source languages (list of one-tensors)
:param tgt_langs: Target Languages (list of one-tensors)
:param batch_size_words: Maximum number of words in the minibatch (MB can't have more than this)
:param data_type: Text or Audio
:param batch_size_sents: Maximum number of sequences in the minibatch (MB can't have more than this)
:param multiplier: The number of sequences must divide by this number (for fp16 when multiplier=8)
:param reshape_speech: Put N frames together to reduce the length (this might be done already in preprocessing)
:param augment: Speech Augmentation (currently only spec augmentation is implemented)
"""
"""
For alignment, the right-aligned data looks like:
P P P P D D D D
P P D D D D D D
P P P P P D D D
P P P D D D D D
This can affect positional encoding (whose implementation is not consistent w.r.t padding)
For models with absolute positional encoding, src and tgt should be aligned left (This is default)
For models with relative positional encoding, src should be right and tgt should be left
"""
self.src = src_data
self._type = data_type
self.upsampling = kwargs.get('upsampling', False)
self.debug = debug
# self.reshape_speech = reshape_speech
if tgt_data:
self.tgt = tgt_data
if src_data:
assert (len(self.src) == len(self.tgt))
else:
self.tgt = None
self.max_src_len = kwargs.get('max_src_len', None)
self.max_tgt_len = kwargs.get('max_tgt_len', 128)
if self.max_src_len is None:
if self._type == 'text':
self.max_src_len = 128
else:
self.max_src_len = 1024
# Remove the sentences that are empty
if cleaning:
cleaned_src = []
cleaned_tgt = []
n_removes = []
for i, (src_tensor, tgt_tensor) in enumerate(zip(self.src, self.tgt)):
src_size = src_tensor.size(0)
tgt_size = tgt_tensor.size(0)
if src_size < self.max_src_len and tgt_size < self.max_tgt_len:
cleaned_src.append(src_tensor)
cleaned_tgt.append(tgt_tensor)
else:
n_removes.append(i)
self.src = cleaned_src
self.tgt = cleaned_tgt
print("Removed %d sentences that are too long. " % len(n_removes))
# in stream dataset we don't sort data
self.src_langs = src_langs
self.tgt_langs = tgt_langs
if self.src_langs is not None and self.tgt_langs is not None:
assert (len(src_langs) == len(tgt_langs))
if cleaning:
n_samples = len(src_langs)
if len(self.src_langs) > 1:
self.src_langs = [self.src_langs[i] for i in range(n_samples) and i not in n_removes]
if len(self.tgt_langs) > 1:
self.tgt_langs = [self.tgt_langs[i] for i in range(n_samples) and i not in n_removes]
# In "bilingual" case, the src_langs only contains one single vector
# Which is broadcasted to batch_size
if len(src_langs) <= 1:
self.bilingual = True
else:
self.bilingual = False
self.fullSize = len(self.src) if self.src is not None else len(self.tgt)
# maximum number of tokens in a mb
self.batch_size_words = batch_size_words
# maximum sequences in a mb
self.batch_size_sents = batch_size_sents
# the actual batch size must divide by this multiplier (for fp16 it has to be 4 or 8)
self.multiplier = multiplier
# by default: count the amount of padding when we group mini-batches
self.pad_count = False
# group samples into mini-batches
self.streams = []
self.num_batches = 0
self.n_streams = 0
self.allocate_batch()
self.current_stream_index = 0
self.in_stream_index = 0
self.stream_order = None
if augment:
self.augmenter = Augmenter()
else:
self.augmenter = None
def size(self):
return self.fullSize
def switchout(self, batch):
pass
# This function allocates the mini-batches (grouping sentences with the same size)
def allocate_batch(self):
cur_stream = []
cur_batch = []
cur_batch_size = 0
cur_batch_sizes = []
def oversize_(cur_batch, sent_size):
if len(cur_batch) == 0:
return False
if len(cur_batch) >= self.batch_size_sents:
return True
if cur_batch_size + sent_size > self.batch_size_words:
return True
return False
i = 0
while i < self.fullSize:
src_size = self.src[i].size(0) if self.src is not None else 0
tgt_size = self.tgt[i].size(0) if self.tgt is not None else 0
if self.debug:
print(i, src_size, tgt_size)
if self.tgt is not None and self.src is not None:
sentence_length = self.tgt[i].size(0) + self.src[i].size(0) - 1
elif self.tgt is not None:
sentence_length = self.tgt[i].size(0) - 1
else:
sentence_length = self.src[i].size(0)
# first of document or meet a blank line:
if i == 0 or src_size == 0 or tgt_size == 2:
if len(cur_batch) > 0:
if self.debug:
print("Created a batch: ", cur_batch)
cur_stream.append(cur_batch)
if len(cur_stream) > 0:
self.streams.append(cur_stream)
cur_stream = []
cur_batch = []
cur_batch_size = 0
cur_batch_sizes = []
if src_size == 0 or tgt_size == 2: # blank line, move on
i = i + 1
continue
oversized = oversize_(cur_batch, sentence_length)
# if the current item makes the batch exceed max size
# then we create a new batch
if oversized:
# cut-off the current list to fit the multiplier
batch_ = cur_batch
cur_stream.append(batch_) # add this batch into the current stream
if self.debug:
print("Created a batch: ", batch_)
cur_batch = []
cur_batch_sizes = []
cur_batch_size = 0
cur_batch.append(i)
cur_batch_size += sentence_length
cur_batch_sizes.append(sentence_length)
i = i + 1
# catch the last batch
if len(cur_batch) > 0:
cur_stream.append(cur_batch)
# catch the last stream:
if len(cur_stream) > 0:
self.streams.append(cur_stream)
self.num_batches = sum([len(stream) for stream in self.streams])
self.n_streams = len(self.streams)
print("* Total %d streams collected." % self.n_streams)
def __len__(self):
return self.num_batches
def __getitem__(self, index):
"""
:param index: the index of the mini-batch in the list
:return: Batch
"""
# print("!!! Stream dataset cannot be accessed with getitem ...")
# raise NotImplementedError
stream_id, batch_id = index
n_batches = len(self.streams[stream_id])
assert stream_id < self.n_streams, "%d > %d" % (stream_id, self.n_streams)
assert batch_id < n_batches, "%d > %d" % (batch_id, n_batches)
# access the batch
batch_ids = self.streams[stream_id][batch_id]
if self.src:
src_data = [self.src[i] for i in batch_ids]
else:
src_data = None
if self.tgt:
tgt_data = [self.tgt[i] for i in batch_ids]
else:
tgt_data = None
src_lang_data = None
tgt_lang_data = None
if self.bilingual:
if self.src_langs is not None:
src_lang_data = [self.src_langs[0]] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1]
else:
if self.src_langs is not None:
src_lang_data = [self.src_langs[i] for i in batch_ids]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[i] for i in batch_ids]
batch = Stream(src_data, tgt_data=tgt_data,
src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data,
src_type=self._type,
augmenter=self.augmenter, upsampling=self.upsampling)
return batch
def __len__(self):
return self.num_batches
# genereate a new batch - order (static)
def create_order(self, random=True):
self.current_stream_index = 0
self.in_stream_index = 0
if random:
self.stream_order = torch.randperm(len(self.streams))
else:
self.stream_order = torch.arange(len(self.streams)).long()
return self.stream_order
# return the next batch according to the iterator
def next(self, curriculum=False, reset=True, split_sizes=1):
# reset iterator if reach data size limit
if self.current_stream_index >= self.n_streams:
if reset:
self.current_stream_index = 0
self.in_stream_index = 0
else:
return None
current_stream_size = len(self.streams[self.stream_order[self.current_stream_index]])
#
# if curriculum or self.batchOrder is None:
# batch_index = self.cur_index
# else:
# batch_index = self.batchOrder[self.cur_index]
batch_index = [self.stream_order[self.current_stream_index], self.in_stream_index]
batch = self[batch_index]
#
# move the iterator one step
self.in_stream_index += 1
# if the current stream runs out of batch: move to a new stream
if self.in_stream_index >= current_stream_size:
self.current_stream_index += 1
self.in_stream_index = 0
return [batch]
def is_new_stream(self):
# 1 because we will call this function after the "0" was given
return self.in_stream_index == 1
def shuffle(self):
data = list(zip(self.src, self.tgt))
self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])
def set_index(self, iteration):
print("This jumping is not implemented for stream dataset. Use -reset_optim instead to start from beginning")
raise NotImplementedError
# assert (0 <= iteration < self.num_batches)
# self.cur_index = iteration
| 21,315 | 35.62543 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/data/whisper_audio.py | import os
from functools import lru_cache
from typing import Optional, Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input
N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2
FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame
TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(
dim=axis, index=torch.arange(length, device=array.device)
)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(
os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")
) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(
audio: Union[str, np.ndarray, torch.Tensor],
n_mels: int = N_MELS,
padding: int = 0,
device: Optional[Union[str, torch.device]] = None,
):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
padding: int
Number of zero samples to pad to the right
device: Optional[Union[str, torch.device]]
If given, the audio tensor is moved to this device before STFT
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
if device is not None:
audio = audio.to(device)
if padding > 0:
audio = F.pad(audio, (0, padding))
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec | 4,767 | 31.435374 | 99 | py |
NMTGMinor | NMTGMinor-master/onmt/data/stream_dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.speech.Augmenter import Augmenter
from onmt.modules.dropout import switchout
"""
Data management for stream-to-stream models
Two basic classes:
- Batch stores the input / output sequences, grouped into tensors with the same length (by padding)
- Dataset stores all of the data and
"""
class Stream(object):
# An object to manage the data within a stream
def __init__(self, src_data, tgt_data=None,
src_lang_data=None, tgt_lang_data=None,
src_type='text',
length_multiplier=1,
augmenter=None, upsampling=False,
**kwargs):
"""
:param src_data: list of source tensors
:param tgt_data: list of target tensors
:param src_lang_data: list of language features for the source (TB finished)
:param tgt_lang_data: list of language features for the target (TB finished)
:param src_type: text or audio
:param reshape_speech: the number of frames to be reshaped
:param augmenter: using augmentation for speech
:param merge: if the two sequences are going to be merged for Relative Transformer
"""
self.tensors = defaultdict(lambda: None)
self.has_target = False
self.src_type = src_type
# self.upsampling = upsampling
# self.feature_size = kwargs.get('feature_size', 40)
self.length_mutliplier = length_multiplier
if src_data is not None:
self.tensors['source'], self.tensors['source_pos'], self.src_lengths = \
self.collate(src_data,
type=self.src_type,
augmenter=augmenter)
self.tensors['src_length'] = self.src_lengths
self.src_size = sum(self.src_lengths)
else:
self.src_size = 0
if tgt_data is not None:
target_full, target_pos, self.tgt_lengths = self.collate(tgt_data)
# self.tensors['target'] = target_full
# self.tensors['target_input'] = target_full[:-1]
# the last sentence has one element (eos) missing
# self.tgt_lengths[-1] = self.tgt_lengths[-1] - 1
# self.tensors['target_output'] = target_full[1:]
# self.tensors['target_pos'] = target_pos[:-1]
self.tensors['target_input'], self.tensors['target_output'], \
self.tensors['target_pos'], self.tgt_lengths = self.collate(tgt_data, target=True)
self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
self.has_target = True
self.tgt_size = sum([len(x) - 1 for x in tgt_data])
else:
self.tgt_size = 0
self.size = len(src_data) if src_data is not None else len(tgt_data)
if src_lang_data is not None:
self.tensors['source_lang'] = torch.cat(src_lang_data).long()
if tgt_lang_data is not None:
self.tensors['target_lang'] = torch.cat(tgt_lang_data).long()
def switchout(self, swrate, src_vocab_size, tgt_vocab_size):
# Switch out function ... currently works with only source text data
if self.src_type == 'text':
self.tensors['source'] = switchout(self.tensors['source'], src_vocab_size, swrate, transpose=True)
if self.has_target:
self.tensors['target'] = switchout(self.tensors['target'], tgt_vocab_size, swrate, transpose=True, offset=1)
target_full = self.tensors['target']
self.tensors['target_input'] = target_full[:-1]
self.tensors['target_output'] = target_full[1:]
self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
# down sampling the speech signal by simply concatenating n features (reshaping)
def downsample(self, data):
if self.reshape_speech == 0:
return data
else:
concat = self.reshape_speech
tensor_ = data.float() # adding float because of fp16 data storage
add = (concat - tensor_.size()[0] % concat) % concat
z = torch.FloatTensor(add, tensor_.size()[1]).zero_()
# adding an additional dimension as padding
tensor_ = torch.cat((tensor_, z), 0)
tensor_ = tensor_.reshape((int(tensor_.size()[0] / concat), tensor_.size()[1] * concat))
return tensor_
def augment_speech(self):
return
def collate(self, data, type="text", augmenter=None, target=False):
"""
Assembling the individual sequences into one single tensor, included padding
:param target:
:param data: the list of sequences in chronological order
:param type: text or audio
:param augmenter: for augmentation in audio models
:return:
data (list of Torch.Tensor) size 1 x T
"""
if type == "text":
if not target:
lengths = torch.LongTensor([x.size(0) for x in data])
positions = [torch.arange(length_) for length_ in lengths]
positions = torch.cat(positions)
# the last part is padded (so that the actual batch size divides by the multiplier
# tensor_length = math.ceil(sum(lengths) / self.length_mutliplier) * self.length_mutliplier
tensor_length = torch.sum(lengths).item()
# create a placeholder for the data
tensor = data[0].new(tensor_length).fill_(onmt.constants.PAD)
offset = 0
for sample in data:
current_length = sample.size(0)
tensor.narrow(0, offset, current_length).copy_(sample)
offset += current_length
tensor = tensor.unsqueeze(1) # batch size is 1
return tensor, positions, lengths
else:
# because we take the last unit away
lengths = torch.LongTensor([x.size(0) - 1 for x in data])
positions = [torch.arange(length_) for length_ in lengths]
positions = torch.cat(positions)
tensor_length = torch.sum(lengths).item()
# create a placeholder for the data
input = data[0].new(tensor_length).fill_(onmt.constants.PAD)
# create a placeholder for the data
target = data[0].new(tensor_length).fill_(onmt.constants.PAD)
offset = 0
for sample in data:
current_length = sample.size(0) - 1
input.narrow(0, offset, current_length).copy_(sample[:-1])
target.narrow(0, offset, current_length).copy_(sample[1:])
offset += current_length
input = input.unsqueeze(1)
target = target.unsqueeze(1)
return input, target, positions, lengths
elif type == "audio":
raise NotImplementedError
#
# # First step: on-the-fly processing for the samples
# # Reshaping: either downsampling or upsampling
# # On the fly augmentation
# samples = []
#
# for i in range(len(data)):
# sample = data[i]
#
# if augmenter is not None:
# sample = augmenter.augment(sample)
#
# if self.upsampling:
# sample = sample.view(-1, self.feature_size)
#
# samples.append(sample)
#
# # compute the lengths afte on-the-fly processing
# lengths = [x.size(0) for x in samples]
#
# max_length = max(lengths)
#
# # allocate data for the batch speech
# feature_size = samples[0].size(1)
# batch_size = len(data)
#
# # feature size + 1 because the last dimension is created for padding
# tensor = data[0].float().new(batch_size, max_length, feature_size + 1).fill_(onmt.constants.PAD)
#
# for i in range(len(samples)):
# sample = samples[i]
#
# data_length = sample.size(0)
# offset = max_length - data_length if align_right else 0
#
# tensor[i].narrow(0, offset, data_length).narrow(1, 1, sample.size(1)).copy_(sample)
# # in padding dimension: 0 is not padded, 1 is padded
# tensor[i].narrow(0, offset, data_length).narrow(1, 0, 1).fill_(1)
#
# return tensor, None, lengths
# else:
# raise NotImplementedError
def get(self, name):
if name in self.tensors:
return self.tensors[name]
else:
return None
def cuda(self, fp16=False):
"""
Send the minibatch data into GPU. Old-fashioned without the 'device' control
:param fp16:
:return: None
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
v = tensor[k]
tensor[k] = v.cuda()
elif tensor is not None:
if tensor.type() == "torch.FloatTensor" and fp16:
self.tensors[key] = tensor.half()
self.tensors[key] = self.tensors[key].cuda()
else:
continue
class StreamDataset(torch.utils.data.Dataset):
def __init__(self, src_data, tgt_data,
src_langs=None, tgt_langs=None,
batch_size_words=2048,
data_type="text", batch_size_sents=128,
multiplier=1,
augment=False,
**kwargs):
"""
:param src_data: List of tensors for the source side (1D for text, 2 or 3Ds for other modalities)
:param tgt_data: List of tensors (1D text) for the target side (already padded with <s> and </s>
:param src_langs: Source languages (list of one-tensors)
:param tgt_langs: Target Languages (list of one-tensors)
:param batch_size_words: Maximum number of words in the minibatch (MB can't have more than this)
:param data_type: Text or Audio
:param batch_size_sents: Maximum number of sequences in the minibatch (MB can't have more than this)
:param multiplier: The number of sequences must divide by this number (for fp16 when multiplier=8)
:param reshape_speech: Put N frames together to reduce the length (this might be done already in preprocessing)
:param augment: Speech Augmentation (currently only spec augmentation is implemented)
"""
"""
For alignment, the right-aligned data looks like:
P P P P D D D D
P P D D D D D D
P P P P P D D D
P P P D D D D D
This can affect positional encoding (whose implementation is not consistent w.r.t padding)
For models with absolute positional encoding, src and tgt should be aligned left (This is default)
For models with relative positional encoding, src should be right and tgt should be left
"""
self.src = src_data
self._type = data_type
self.upsampling = kwargs.get('upsampling', False)
# self.reshape_speech = reshape_speech
if tgt_data:
self.tgt = tgt_data
if src_data:
assert (len(self.src) == len(self.tgt))
else:
self.tgt = None
# in stream dataset we don't sort data
self.src_langs = src_langs
self.tgt_langs = tgt_langs
if self.src_langs is not None and self.tgt_langs is not None:
assert (len(src_langs) == len(tgt_langs))
# In "bilingual" case, the src_langs only contains one single vector
# Which is broadcasted to batch_size
if len(src_langs) <= 1:
self.bilingual = True
else:
self.bilingual = False
self.fullSize = len(self.src) if self.src is not None else len(self.tgt)
# maximum number of tokens in a mb
self.batch_size_words = batch_size_words
# maximum sequences in a mb
self.batch_size_sents = batch_size_sents
# the actual batch size must divide by this multiplier (for fp16 it has to be 4 or 8)
self.multiplier = multiplier
# by default: count the amount of padding when we group mini-batches
self.pad_count = False
# group samples into mini-batches
self.batches = []
self.num_batches = 0
self.allocate_batch()
self.cur_index = 0
self.batchOrder = None
if augment:
self.augmenter = Augmenter()
else:
self.augmenter = None
def size(self):
return self.fullSize
def switchout(self, batch):
pass
# This function allocates the mini-batches (grouping sentences with the same size)
def allocate_batch(self):
cur_batch = []
cur_batch_size = 0
cur_batch_sizes = []
def oversize_(cur_batch, sent_size):
if len(cur_batch) == 0:
return False
if len(cur_batch) >= self.batch_size_sents:
return True
if cur_batch_size + sent_size > self.batch_size_words:
return True
return False
i = 0
while i < self.fullSize:
if self.tgt is not None and self.src is not None:
sentence_length = self.tgt[i].size(0) + self.src[i].size(0) - 1
elif self.tgt is not None:
sentence_length = self.tgt[i].size(0) - 1
else:
sentence_length = self.src[i].size(0)
oversized = oversize_(cur_batch, sentence_length)
# if the current item makes the batch exceed max size
# then we create a new batch
if oversized:
# cut-off the current list to fit the multiplier
current_size = len(cur_batch)
scaled_size = max(
self.multiplier * (current_size // self.multiplier),
current_size % self.multiplier)
batch_ = cur_batch[:scaled_size]
self.batches.append(batch_) # add this batch into the batch list
cur_batch = cur_batch[scaled_size:] # reset the current batch
cur_batch_sizes = cur_batch_sizes[scaled_size:]
cur_batch_size = sum(cur_batch_sizes)
cur_batch.append(i)
cur_batch_size += sentence_length
cur_batch_sizes.append(sentence_length)
i = i + 1
# catch the last batch
if len(cur_batch) > 0:
self.batches.append(cur_batch)
self.num_batches = len(self.batches)
def __len__(self):
return self.num_batches
def __getitem__(self, index):
"""
:param index: the index of the mini-batch in the list
:return: Batch
"""
assert index < self.num_batches, "%d > %d" % (index, self.num_batches)
batch_ids = self.batches[index]
if self.src:
src_data = [self.src[i] for i in batch_ids]
else:
src_data = None
if self.tgt:
tgt_data = [self.tgt[i] for i in batch_ids]
else:
tgt_data = None
src_lang_data = None
tgt_lang_data = None
if self.bilingual:
if self.src_langs is not None:
src_lang_data = [self.src_langs[0]] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1]
else:
if self.src_langs is not None:
src_lang_data = [self.src_langs[i] for i in batch_ids]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[i] for i in batch_ids]
batch = Stream(src_data, tgt_data=tgt_data,
src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data,
src_type=self._type,
augmenter=self.augmenter, upsampling=self.upsampling)
return batch
def __len__(self):
return self.num_batches
# genereate a new batch - order (static)
def create_order(self, random=True):
# always generate in order of the data
self.batchOrder = torch.arange(self.num_batches).long()
self.cur_index = 0
return self.batchOrder
# return the next batch according to the iterator
def next(self, curriculum=False, reset=True, split_sizes=1):
# reset iterator if reach data size limit
if self.cur_index >= self.num_batches:
if reset:
self.cur_index = 0
else:
return None
if curriculum or self.batchOrder is None:
batch_index = self.cur_index
else:
batch_index = self.batchOrder[self.cur_index]
batch = self[batch_index]
# move the iterator one step
self.cur_index += 1
return [batch]
def shuffle(self):
data = list(zip(self.src, self.tgt))
self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])
def set_index(self, iteration):
assert (0 <= iteration < self.num_batches)
self.cur_index = iteration | 17,676 | 35.598344 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/data/dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.speech.Augmenter import Augmenter
from onmt.modules.dropout import switchout
import numpy as np
from .batch_utils import allocate_batch, allocate_batch_unbalanced
import dill
"""
Data management for sequence-to-sequence models
Two basic classes:
- Batch stores the input / output sequences, grouped into tensors with the same length (by padding)
- Dataset stores all of the data and
"""
def merge_data(data, align_right=False, type='text', augmenter=None, upsampling=False,
feature_size=40, dataname="source", src_pad=1, tgt_pad=1 ):
"""
Assembling the individual sequences into one single tensor, included padding
:param tgt_pad:
:param src_pad:
:param dataname:
:param feature_size:
:param upsampling:
:param data: the list of sequences
:param align_right: aligning the sequences w.r.t padding
:param type: text or audio
:param augmenter: for augmentation in audio models
:return:
"""
# initialize with batch_size * length
# TODO: rewrite this function in Cython
if type == "text":
lengths = [x.size(0) for x in data]
# positions = [torch.arange(length_) for length_ in lengths]
max_length = max(lengths)
# if max_length > 8:
# max_length = math.ceil(max_length / 8) * 8
if dataname == "source":
tensor = data[0].new(len(data), max_length).fill_(src_pad)
elif dataname == "target":
tensor = data[0].new(len(data), max_length).fill_(tgt_pad)
else:
print("Warning: check the dataname")
raise NotImplementedError
pos = None
for i in range(len(data)):
data_length = data[i].size(0)
offset = max_length - data_length if align_right else 0
tensor[i].narrow(0, offset, data_length).copy_(data[i])
return tensor, pos, lengths
elif type in ["audio", "scp"]:
# First step: on-the-fly processing for the samples
# Reshaping: either downsampling or upsampling
# On the fly augmentation
samples = []
for i in range(len(data)):
sample = data[i]
if augmenter is not None:
sample = augmenter.augment(sample)
if upsampling:
sample = sample.view(-1, feature_size)
samples.append(sample)
# compute the lengths afte on-the-fly processing
lengths = [x.size(0) for x in samples]
max_length = max(lengths)
# max_length = math.ceil(max_length / 8) * 8
# allocate data for the batch speech
feature_size = samples[0].size(1)
batch_size = len(data)
# feature size + 1 because the last dimension is created for padding
tensor = data[0].float().new(batch_size, max_length, feature_size + 1).fill_(0)
for i in range(len(samples)):
sample = samples[i]
data_length = sample.size(0)
offset = max_length - data_length if align_right else 0
tensor[i].narrow(0, offset, data_length).narrow(1, 1, sample.size(1)).copy_(sample)
# in padding dimension: 1 is not padded, 0 is padded
tensor[i].narrow(0, offset, data_length).narrow(1, 0, 1).fill_(1)
return tensor, None, lengths
elif type == 'wav':
samples = data
lengths = [x.size(0) for x in samples]
max_length = max(lengths)
# allocate data for the batch speech
feature_size = 1 # samples[0].size(1) # most likely 1
assert feature_size == 1, "expecting feature size = 1 but get %2.f" % feature_size
batch_size = len(data)
# feature size + 1 because the last dimension is created for padding
tensor = data[0].float().new(batch_size, max_length, feature_size + 1).fill_(0)
for i in range(len(samples)):
sample = samples[i]
# normalize
data_length = sample.size(0)
offset = max_length - data_length if align_right else 0
channels = 1
tensor[i].narrow(0, offset, data_length).narrow(1, 1, channels).copy_(sample)
# in padding dimension: 1 is not padded, 0 is padded
tensor[i].narrow(0, offset, data_length).narrow(1, 0, 1).fill_(1)
return tensor, None, lengths
else:
raise NotImplementedError
def collate_fn(src_data, tgt_data,
src_lang_data, tgt_lang_data,
src_atbs_data, tgt_atbs_data,
src_align_right, tgt_align_right,
src_type='text',
augmenter=None, upsampling=False,
bilingual=False, vocab_mask=None,
past_src_data=None, src_pad="<blank>", tgt_pad="<blank>", feature_size=40):
tensors = dict()
if src_data is not None:
tensors['source'], tensors['source_pos'], src_lengths = merge_data(src_data, align_right=src_align_right,
type=src_type, augmenter=augmenter,
upsampling=upsampling, feature_size=feature_size,
dataname="source", src_pad=src_pad)
tensors['src_type'] = src_type
tensors['src_selfattn_mask'] = tensors['source'].eq(src_pad)
tensors['source'] = tensors['source'].transpose(0, 1).contiguous()
if tensors['source_pos'] is not None:
tensors['source_pos'] = tensors['source_pos'].transpose(0, 1)
tensors['src_lengths'] = torch.LongTensor(src_lengths)
tensors['src_size'] = sum(src_lengths)
if tgt_data is not None:
target_full, target_pos, tgt_lengths = merge_data(tgt_data, align_right=tgt_align_right,
dataname="target", tgt_pad=tgt_pad)
tensors['tgt_selfattn_mask'] = target_full.eq(tgt_pad)
target_full = target_full.t().contiguous() # transpose BxT to TxB
tensors['target'] = target_full
tensors['target_input'] = target_full[:-1]
tensors['target_input_selfattn_mask'] = tensors['target_input'].transpose(0, 1).eq(tgt_pad)
tensors['target_output'] = target_full[1:]
if target_pos is not None:
tensors['target_pos'] = target_pos.t().contiguous()[:-1]
tgt_size = sum([len(x) - 1 for x in tgt_data])
tensors['tgt_lengths'] = tgt_lengths
else:
tgt_size = 0
tensors['tgt_lengths'] = None
# merge data for the previous source
if past_src_data is not None:
tensors['past_source'], tensors['past_source_pos'], past_src_lengths = merge_data(past_src_data,
align_right=src_align_right,
type=src_type,
augmenter=augmenter,
upsampling=upsampling,
feature_size=feature_size,
dataname="source",
src_pad=src_pad)
tensors['past_source'] = tensors['past_source'].transpose(0, 1).contiguous()
if tensors['past_source_pos'] is not None:
tensors['past_source_pos'] = tensors['past_source_pos'].transpose(0, 1)
tensors['past_src_lengths'] = torch.LongTensor(past_src_lengths)
tensors['past_src_size'] = sum(past_src_lengths)
tensors['tgt_size'] = tgt_size
tensors['size'] = len(src_data) if src_data is not None else len(tgt_data)
if src_lang_data is not None:
tensors['source_lang'] = torch.cat(src_lang_data).long()
if tgt_lang_data is not None:
tensors['target_lang'] = torch.cat(tgt_lang_data).long()
if src_atbs_data is not None:
tensors['source_atbs'] = torch.cat(src_atbs_data).long()
if tgt_atbs_data is not None:
tensors['target_atbs'] = torch.cat(tgt_atbs_data).long()
tensors['vocab_mask'] = vocab_mask
return LightBatch(tensors)
def rewrap(light_batch):
"""
Currently this light batch is used in data collection to avoid pickling error
After that it is converted to Batch
:param light_batch:
:return:
"""
return Batch(light_batch.tensors)
class Batch(object):
# An object to manage the data within a minibatch
def __init__(self, tensors):
self.tensors = defaultdict(lambda: None, tensors)
self.src_size = tensors['src_size']
self.tgt_size = tensors['tgt_size']
self.size = tensors['size']
self.src_lengths = tensors['src_lengths']
self.tgt_lengths = tensors['tgt_lengths']
self.has_target = True if self.tensors['target'] is not None else False
self.vocab_mask = tensors['vocab_mask']
def get(self, name):
if name in self.tensors:
return self.tensors[name]
else:
return None
def cuda(self, fp16=False, device=None):
"""
Send the minibatch data into GPU.
:param device: default = None (default CUDA device)
:param fp16:
:return: None
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
if isinstance(k, torch.Tensor):
v = tensor[k]
tensor[k] = v.cuda(device=device)
elif tensor is not None:
if isinstance(tensor, torch.Tensor):
if tensor.type() == "torch.FloatTensor" and fp16:
self.tensors[key] = tensor.half()
self.tensors[key] = self.tensors[key].cuda(device=device)
else:
continue
def switchout(self, swrate, src_vocab_size, tgt_vocab_size):
# Switch out function ... currently works with only source text data
# if self.src_type == 'text':
if len(self.tensors['source'].shape) == 2:
self.tensors['source'] = switchout(self.tensors['source'], src_vocab_size, swrate, transpose=True)
if self.has_target:
self.tensors['target'] = switchout(self.tensors['target'], tgt_vocab_size, swrate, transpose=True, offset=1)
# target_full = self.tensors['target']
# self.tensors['target_input'] = target_full[:-1]
# self.tensors['target_output'] = target_full[1:]
# self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.constants.PAD)
# Masked Predictive Coding mask
# Randomly choose positions and set features to Zero
# For later reconstruction
def mask_mpc(self, p=0.5):
# the audio has size [T x B x (F+1)] the FIRST dimension is padding
# need to sample a mask
source = self.tensors['source']
with torch.no_grad():
source = source.narrow(2, 1, source.size(2) - 1)
# p drop -> 1 - p keeping probability
masked_positions = source.new(source.size(0), source.size(1)).bernoulli_(1 - p)
self.tensors['original_source'] = source.clone()
source.mul_(
masked_positions.unsqueeze(-1)) # in-place multiplication that will change the underlying storage
# remember the positions to be used later in losses
self.tensors['masked_positions'] = masked_positions
return
class LightBatch:
def __init__(self, tensors):
self.tensors = tensors
def pin_memory(self):
"""
Enable memory pinning
:return:
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
v = tensor[k]
if isinstance(v, torch.Tensor):
tensor[k] = v.pin_memory()
elif tensor is not None:
if isinstance(tensor, torch.Tensor):
self.tensors[key] = self.tensors[key].pin_memory()
else:
continue
return self
class Dataset(torch.utils.data.Dataset):
def get_tgt_pad(self):
return self.tgt_pad
def get_batches(self):
return self.batches
def get_collater(self):
return self.collater
def get_size(self):
return self.num_batches
def __init__(self, src_data, tgt_data,
src_sizes=None, tgt_sizes=None,
src_langs=None, tgt_langs=None,
src_atbs=None, tgt_atbs=None,
batch_size_frames=1280000,
batch_size_words=16384,
data_type="text", batch_size_sents=128,
multiplier=1, sorting=False,
augment=False,
src_align_right=False, tgt_align_right=False,
verbose=False, cleaning=False, debug=False,
num_split=1,
sa_f=8, sa_t=64, input_size=40,
past_src_data=None,
past_src_data_sizes=None,
constants=None,
**kwargs):
"""
:param src_data: List of tensors for the source side (1D for text, 2 or 3Ds for other modalities)
:param tgt_data: List of tensors (1D text) for the target side (already padded with <s> and </s>
:param src_langs: Source languages (list of one-tensors)
:param tgt_langs: Target Languages (list of one-tensors)
:param batch_size_words: Maximum number of words in the minibatch (MB can't have more than this)
:param data_type: Text or Audio
:param batch_size_sents: Maximum number of sequences in the minibatch (MB can't have more than this)
:param multiplier: The number of sequences must divide by this number (for fp16 when multiplier=8)
:param reshape_speech: Put N frames together to reduce the length (this might be done already in preprocessing)
:param augment: Speech Augmentation (currently only spec augmentation is implemented)
"""
"""
For alignment, the right-aligned data looks like:
P P P P D D D D
P P D D D D D D
P P P P P D D D
P P P D D D D D
This can affect positional encoding (whose implementation is not consistent w.r.t padding)
For models with absolute positional encoding, src and tgt should be aligned left (This is default)
For models with relative positional encoding, src should be right and tgt should be left
"""
if constants is not None:
constants = dill.loads(constants)
self.tgt_pad = constants.TGT_PAD
self.src_pad = constants.SRC_PAD
else:
self.tgt_pad = onmt.constants.TGT_PAD
self.src_pad = onmt.constants.SRC_PAD
self.src = src_data
self.past_src = past_src_data
self._type = data_type
self.src_align_right = src_align_right
if self.src_align_right and verbose:
print("* Source sentences aligned to the right side.")
self.tgt_align_right = tgt_align_right
self.upsampling = kwargs.get('upsampling', False)
self.max_src_len = kwargs.get('max_src_len', None)
self.max_tgt_len = kwargs.get('max_tgt_len', 256 )
self.cleaning = int(cleaning)
self.debug = debug
self.num_split = num_split
self.vocab_mask = None
self.use_past_src = self.past_src is not None
self.min_tgt_len = kwargs.get('min_tgt_len', 3)
self.min_src_len = kwargs.get('min_src_len', 2)
self.batch_size_frames = batch_size_frames
cut_off_size = kwargs.get('cut_off_size', 200000)
smallest_batch_size = kwargs.get('smallest_batch_size', 4)
if self.max_src_len is None:
if self._type == 'text':
self.max_src_len = 256
elif self._type == 'wav':
self.max_src_len = 320000
else:
# for audio set this to 2048 frames
self.max_src_len = 4096 if not self.use_past_src else 8192
# self.reshape_speech = reshape_speech
if tgt_data:
self.tgt = tgt_data
else:
self.tgt = None
# Processing data sizes
if self.src is not None:
if src_sizes is not None:
if verbose:
print("Loading source size from binarized data ...")
src_sizes = np.asarray(src_sizes)
else:
if verbose:
print("Source size not available. Computing source size from data...")
src_sizes = np.asarray([data.size(0) for data in self.src])
else:
src_sizes = None
# add the past source size to source size (to balance out the encoder part during allocation)
if self.use_past_src:
if past_src_data_sizes is not None:
src_sizes += np.asarray(past_src_data_sizes)
else:
src_sizes += np.asarray([data.size(0) for data in self.past_src])
if self.tgt is not None:
if tgt_sizes is not None:
print("Loading target size from binarized data ...")
tgt_sizes = np.asarray(tgt_sizes)
else:
print("Target size not available. Computing target size from data...")
tgt_sizes = np.asarray([data.size(0) for data in self.tgt])
else:
tgt_sizes = None
# sort data to have efficient mini-batching during training
if sorting:
if self._type == 'text':
sorted_order = np.lexsort((src_sizes, tgt_sizes))
elif self._type in ['audio', 'wav']:
sorted_order = np.lexsort((tgt_sizes, src_sizes))
else:
sorted_order = np.arange(len(self.src))
self.order = None
# store data length in numpy for fast query
if self.tgt is not None and self.src is not None:
stacked_sizes = np.stack((src_sizes, tgt_sizes - 1), axis=0)
data_lengths = np.amax(stacked_sizes, axis=0)
elif self.src is None:
data_lengths = tgt_sizes
else:
data_lengths = src_sizes
# Processing language ids
self.src_langs = src_langs
self.tgt_langs = tgt_langs
if self.src_langs is not None and self.tgt_langs is not None:
assert (len(src_langs) == len(tgt_langs))
# Processing attributes
self.src_atbs = src_atbs
self.tgt_atbs = tgt_atbs
# In "bilingual" case, the src_langs only contains one single vector
# Which is broadcasted to batch_size
if len(src_langs) <= 1:
self.bilingual = True
if self.src_atbs is not None:
assert(len(src_atbs) <= 1), "For a bilingual dataset, expect attributes to be 'singular' too"
else:
self.bilingual = False
self.full_size = len(src_sizes)
# self.full_size = len(self.src) if self.src is not None else len(self.tgt)
# maximum number of tokens in a mb
self.batch_size_words = batch_size_words
# maximum sequences in a mb
self.batch_size_sents = batch_size_sents
# the actual batch size must divide by this multiplier (for fp16 it has to be 4 or 8)
self.multiplier = multiplier
# by default: count the amount of padding when we group mini-batches
self.pad_count = True
# group samples into mini-batches
# if verbose:
# print("* Allocating mini-batches ...")
if self._type in ['audio', 'wav']:
self.batches = allocate_batch_unbalanced(sorted_order, data_lengths,
src_sizes, tgt_sizes,
batch_size_frames, batch_size_words,
batch_size_sents, self.multiplier,
self.max_src_len, self.max_tgt_len,
self.min_src_len, self.min_tgt_len, self.cleaning,
cut_off_size, smallest_batch_size)
else:
self.batches = allocate_batch(sorted_order, data_lengths,
src_sizes, tgt_sizes,
batch_size_words, batch_size_sents, self.multiplier,
self.max_src_len, self.max_tgt_len,
self.min_src_len, self.min_tgt_len, self.cleaning)
# the second to last mini-batch is likely the largest
# (the last one can be the remnant after grouping samples which has less than max size)
self.largest_batch_id = len(self.batches) - 3
self.num_batches = len(self.batches)
self.batch_sizes = [len(x) for x in self.batches]
# if self.src_sizes is not None:
# self.batch_src_sizes = [max([self.src_sizes[x] for x in b]) for b in self.batches]
# else:
# self.batch_src_sizes = [0 for b in self.batches]
#
# if self.tgt_sizes is not None:
# self.batch_tgt_sizes = [max([self.tgt_sizes[x] for x in b]) for b in self.batches]
# else:
# self.batch_tgt_sizes = [0 for b in self.batches]
print("Number of sentences before cleaning and sorting: %d" % len(src_sizes) )
print("Number of sentences after cleaning and sorting: %d" % sum(self.batch_sizes) )
print("Number of batches after cleaning and sorting: %d" % self.num_batches)
self.cur_index = 0
self.batchOrder = None
self.input_size = input_size
if augment:
self.augmenter = Augmenter(F=sa_f, T=sa_t, input_size=input_size)
else:
self.augmenter = None
def flush_cache(self):
if hasattr(self.src, 'flush_cache'):
self.src.flush_cache()
def size(self):
return self.full_size
def switchout(self, batch):
pass
def set_epoch(self, epoch):
pass
def set_mask(self, vocab_mask):
self.vocab_mask = vocab_mask
def get_largest_batch(self, bsz=-1, src_size=-1, tgt_size=-1):
if bsz == -1 and src_size == -1 and tgt_size == -1:
return self.get_batch(self.largest_batch_id)
else:
raise NotImplementedError
# batch = None
# for i in range(self.num_batches):
#
# src_size_ = self.batch_src_sizes[i]
# tgt_size_ = self.batch_tgt_sizes[i]
# bsz_size_ = self.batch_sizes[i]
#
# get_batch = True
# if bsz > 0:
# if bsz_size_ != bsz:
# get_batch = False
#
# if src_size > 0:
# if src_size_ != src_size:
# get_batch = False
#
# if tgt_size > 0:
# if tgt_size_ != tgt_size:
# get_batch = False
#
# if get_batch:
# # print("Found batch satisfying the conditions bsz %d src_size %d tgt_size %d" % (bsz, src_size, tgt_size))
# return self.get_batch(i)
# print("Cannot find the batch satisfying those conditions")
return self.get_batch(self.largest_batch_id)
def __len__(self):
return self.num_batches
def __getitem__(self, index):
src_lang, tgt_lang = None, None
src_atb, tgt_atb = None, None
if self.bilingual:
if self.src_langs is not None:
src_lang = self.src_langs[0] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang = self.tgt_langs[0] # should be a tensor [1]
if self.src_atbs is not None:
src_atb = self.src_atbs[0]
if self.tgt_atbs is not None:
tgt_atb = self.tgt_atbs[0]
else:
if self.src_langs is not None:
src_lang = self.src_langs[index]
if self.tgt_langs is not None:
tgt_lang = self.tgt_langs[index]
# if self.src_atbs is not None:
# src_atb = self.src_atbs[index]
# if self.tgt_atbs is not None:
# tgt_atb = self.tgt_atbs[index]
src_atb = None
tgt_atb = None
# move augmenter here?
if self.use_past_src:
past_src = self.past_src[index]
else:
past_src = None
sample = {
'src': self.src[index] if self.src is not None else None,
'tgt': self.tgt[index] if self.tgt is not None else None,
'src_lang': src_lang,
'tgt_lang': tgt_lang,
'src_atb': src_atb,
'tgt_atb': tgt_atb,
'past_src': past_src
}
return sample
def get_batch(self, index):
"""
This function is only used in when we need to access a batch directly from the dataset
(Without an external loader)
:param index: the index of the mini-batch in the list
:return: Batch
"""
assert index < self.num_batches, "%d > %d" % (index, self.num_batches)
batch_ids = self.batches[index]
if self.src:
src_data = [self.src[i] for i in batch_ids]
else:
src_data = None
if self.tgt:
tgt_data = [self.tgt[i] for i in batch_ids]
else:
tgt_data = None
src_lang_data = None
tgt_lang_data = None
src_atbs_data = None
tgt_atbs_data = None
if self.bilingual:
if self.src_langs is not None:
src_lang_data = [self.src_langs[0]] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1]
if self.src_atbs is not None:
src_atbs_data = [self.src_atbs[0]]
if self.tgt_atbs is not None:
tgt_atbs_data = [self.tgt_atbs[0]]
else:
if self.src_langs is not None:
src_lang_data = [self.src_langs[i] for i in batch_ids]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[i] for i in batch_ids]
# if self.src_atbs is not None:
# src_atbs_data = [self.src_atbs[i] for i in batch_ids]
# if self.tgt_atbs is not None:
# tgt_atbs_data = [self.tgt_atbs[i] for i in batch_ids]
src_atbs_data = None
tgt_atbs_data = None
if self.use_past_src:
past_src = [self.past_src[i] for i in batch_ids]
else:
past_src = None
batch = rewrap(collate_fn(src_data, tgt_data=tgt_data,
src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data,
src_atbs_data=src_atbs_data, tgt_atbs_data=tgt_atbs_data,
src_align_right=self.src_align_right, tgt_align_right=self.tgt_align_right,
src_type=self._type,
augmenter=self.augmenter, upsampling=self.upsampling, vocab_mask=self.vocab_mask,
past_src_data=past_src,
src_pad=self.src_pad,
tgt_pad=self.tgt_pad,
feature_size=self.input_size),
)
return batch
def collater(self, collected_samples):
"""
Merge a list of samples into a Batch
:param collected_samples: list of dicts (the output of the __getitem__)
:return: batch
"""
split_size = math.ceil(len(collected_samples) / self.num_split)
sample_list = [collected_samples[i:i + split_size]
for i in range(0, len(collected_samples), split_size)]
batches = list()
for samples in sample_list:
src_data, tgt_data = None, None
src_lang_data, tgt_lang_data = None, None
src_atbs_data, tgt_atbs_data = None, None
past_src_data = None
if self.src:
src_data = [sample['src'] for sample in samples]
if self.tgt:
tgt_data = [sample['tgt'] for sample in samples]
if self.bilingual:
if self.src_langs is not None:
src_lang_data = [self.src_langs[0]] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [self.tgt_langs[0]] # should be a tensor [1]
if self.src_atbs is not None:
src_atbs_data = [self.src_atbs[0]]
if self.tgt_atbs is not None:
tgt_atbs_data = [self.tgt_atbs[0]]
else:
if self.src_langs is not None:
src_lang_data = [sample['src_lang'] for sample in samples] # should be a tensor [0]
if self.tgt_langs is not None:
tgt_lang_data = [sample['tgt_lang'] for sample in samples] # should be a tensor [1]
# if self.src_atbs is not None:
# src_atbs_data = [self.src_atbs[i] for i in batch_ids]
# if self.tgt_atbs is not None:
# tgt_atbs_data = [self.tgt_atbs[i] for i in batch_ids]
src_atbs_data = None
tgt_atbs_data = None
if self.use_past_src:
past_src_data = [sample['past_src'] for sample in samples]
batch = collate_fn(src_data, tgt_data=tgt_data,
src_lang_data=src_lang_data, tgt_lang_data=tgt_lang_data,
src_atbs_data=src_atbs_data, tgt_atbs_data=tgt_atbs_data,
src_align_right=self.src_align_right, tgt_align_right=self.tgt_align_right,
src_type=self._type,
augmenter=self.augmenter, upsampling=self.upsampling, vocab_mask=self.vocab_mask,
past_src_data=past_src_data, src_pad=self.src_pad, tgt_pad=self.tgt_pad,
feature_size=self.input_size)
batches.append(batch)
return batches
def full_size(self):
return self.full_size
# genereate a new batch - order (static)
def create_order(self, random=True):
if random:
self.batchOrder = torch.randperm(self.num_batches)
else:
self.batchOrder = torch.arange(self.num_batches).long()
self.cur_index = 0
return self.batchOrder
# # return the next batch according to the iterator
# def next(self, curriculum=False, reset=True):
#
# # reset iterator if reach data size limit
# if self.cur_index >= self.num_batches:
# if reset:
# self.cur_index = 0
# else:
# return None
#
# if curriculum or self.batchOrder is None:
# batch_index = self.cur_index
# else:
# batch_index = self.batchOrder[self.cur_index]
#
# batch = self[batch_index]
#
# # move the iterator one step
# self.cur_index += 1
#
# return [batch]
#
# def shuffle(self):
# data = list(zip(self.src, self.tgt))
# self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])
#
# def set_index(self, iteration):
#
# assert (0 <= iteration < self.num_batches)
# self.cur_index = iteration
| 32,665 | 38.499395 | 129 | py |
NMTGMinor | NMTGMinor-master/onmt/data/data_iterator.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import random
import numpy as np
import torch
from onmt.data.dataset import rewrap
from onmt.data import data_utils
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by
``__len__``. This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None, empty=False):
self.iterable = iterable
self.itr = iter(self)
self.empty = empty
if start is None:
self.n = getattr(iterable, 'n', 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
if self.empty:
return
for x in self.iterable:
if self.n >= self.total:
return
self.n += 1
yield x
def __next__(self):
if self.empty:
return None
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self.iterable, "take"):
self.iterable.take(n)
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, pin_memory=False):
"""Return a new iterator over the dataset.
Args:
:param shuffle: (bool, optional): shuffle batches before returning the
iterator (default: True).
:param pin_memory: bool
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
dataset (~torch.utils.data.Dataset)
"""
class DataIterator(EpochBatchIterating):
def __init__(self, dataset, collate_fn, batch_sampler, seed=1, num_workers=0,
epoch=1, buffer_size=0, timeout=0, num_shards=1, shard_id=0, fill_value=None, split_even=True):
"""
:param dataset:
:param collate_fn:
:param batch_sampler:
:param seed:
:param num_workers:
:param epoch:
:param buffer_size:
:param timeout:
:param shard_id: equivalent with rank
:param num_shards: equivalent with world size
"""
# it can be torch.utils.data.Dataset or a proxy class used to share between the processes in the node
# assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.frozen_batches = tuple(batch_sampler) # ??
self.seed = seed
self.num_workers = num_workers
self.epoch = max(epoch, 1)
self.buffer_size = buffer_size
self.timeout = timeout
self.shard_id = shard_id
self.num_shards = num_shards
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._support_prefetch = False
self.fill_value = fill_value
self.split_even = split_even
def __len__(self):
# number of minibatches, or ???
if self.split_even:
return math.ceil(len(self.frozen_batches) / self.num_shards) * self.num_shards
else:
return len(self.frozen_batches)
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called"""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, pin_memory=False, split_even=False):
"""
Return a new iterator over the dataset
:param split_even:
:param pin_memory:
:param shuffle:
:return:
"""
self.epoch = self.next_epoch_idx
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch, shuffle, pin_memory=pin_memory)
self.dataset.set_epoch(self.epoch)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
""" The number of consumed batches in the current epoch"""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
'shuffle': self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
if state_dict is not None:
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get('shuffle', True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
# we finished the epoch, increment epoch counter
self.epoch += 1
else:
self._next_epoch_itr = None
else:
self.epoch = 1
itr_pos = 0
self._next_epoch_itr = None
def _get_iterator_for_epoch(self, epoch, shuffle, offset=0, pin_memory=False):
def shuffle_batches(batches_, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches_)
return batches_
if self._support_prefetch:
raise NotImplementedError
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = list(self.frozen_batches)
num_shards = self.num_shards
# if split even then fill the batch with random batches
if self.split_even:
if len(batches) % self.num_shards != 0:
for _ in range(num_shards - (len(batches) % num_shards)):
rand_id = random.randint(0, len(batches) - 1)
batches.append(batches[rand_id])
batches = list(ShardedIterator(batches, num_shards, self.shard_id, fill_value=batches[0]))
# catch the exception when the data is so small that one iterator is completely empty
if len(batches) == 0:
empty = True
else:
empty = False
#
# if offset > 0 and offset >= len(batches):
# return None
if self.num_workers > 0:
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
pin_memory=pin_memory,
timeout=self.timeout,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CoutingIterator
itr = CountingIterator(itr, start=offset, empty=empty)
return itr
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
# 4 shard for 6 gpu:
# shard_len = 1
# 5 gpus get 0 zeros
n_full_gpus = math.floor(len(iterable) / float(sharded_len))
#
# if shard_id == (num_shards - 1): # last shard takes the remaining
# sharded_len = len(iterable) - sharded_len * (num_shards - 1)
if shard_id < n_full_gpus:
sharded_len = sharded_len
elif shard_id == n_full_gpus: # the very next one after full
sharded_len = len(iterable) - sharded_len * n_full_gpus
else:
sharded_len = 0
# # first islice takes a list of minibatch-ids from shard_id to max, every "num_shards"
# # next, zip_longest takes the zip between (0, 1, ... n) and
# # the minibatches (longest, fill the latter with [])
# # next, map will apply the function taking the minibatches to return the iterator
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
self._source_iter = iter(self._source)
for _ in range(len(self._source)):
item = next(self._source_iter)
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
del self._source_iter
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self.max_len = None
self._consumer = None
self.start_time = time.time()
self.warning_time = None
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.max_len
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return len(self._iterable)
def take(self, n):
self.max_len = n
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < max(1, self._queue.maxsize // 2):
if time.time() - self.start_time > 5 * 60:
if self.warning_time is None or time.time() - self.warning_time > 15 * 60:
# print(
# "Data loading buffer is empty or nearly empty (%d). This may "
# "indicate a data loading bottleneck, and increasing the "
# "number of workers (--num-workers) may help." % self._queue.qsize()
# )
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item | 14,073 | 31.354023 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/data/binarizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
import os
from onmt.utils import safe_readline, safe_readaudio
# from multiprocessing import Pool
import torch.multiprocessing as mp
import torch
import onmt
import numpy as np
from .audio_utils import ArkLoader
class SpeechBinarizer:
def __init__(self):
pass
@staticmethod
def binarize_h5_file(filename, output_format='raw',
prev_context=0, concat=4, stride=1, fp16=False):
file_idx = -1;
if filename[-2:] == "h5":
srcf = h5.File(filename, 'r')
else:
file_idx = 0
srcf = h5.File(filename + "." + str(file_idx) + ".h5", 'r')
while True:
if input_format == "h5":
if str(index) in srcf:
feature_vector = np.array(srcf[str(index)])
elif file_idx != -1:
srcf.close()
file_idx += 1
srcf = h5.File(src_file + "." + str(file_idx) + ".h5", 'r')
feature_vector = np.array(srcf[str(index)])
else:
print("No feature vector for index:", index, file=sys.stderr)
break
raise NotImplementedError
@staticmethod
def binarize_file_single_thread(filename, ark_loader, offset=0, end=-1, worker_id=0,
input_format='scp', output_format='raw',
prev_context=0, concat=4, stride=1, fp16=False, sample_rate=16000, verbose=False):
# if output_format is scp, we only read the length for sorting
if output_format == 'scp':
assert input_format in ['kaldi', 'scp']
if output_format == 'wav':
input_format = 'wav'
# audio_data = iter(ReadHelper('scp:' + filename))
# data_file = open(filename)
# data_keys = list(data.keys())
# data_paths = list(data._dict.values())
result = dict()
data = list()
lengths = list()
index = 0
with open(filename, 'r', encoding='utf-8') as f:
f.seek(offset)
line = safe_readline(f)
while line:
if 0 < end < f.tell():
break
parts = line.split()
key = parts[0]
# this special case is for the "preceeding"
if key == 'NULL':
feature_vector = torch.zeros(0, 0)
lengths.append(feature_vector.size(0))
line = f.readline()
continue
if input_format in ['scp', 'kaldi']:
# an scp file has the format: uttid path:mem
path = parts[1]
# read numpy array from the ark here
feature_vector = ark_loader.load_mat(path)
if stride == 1:
feature_vector = torch.from_numpy(feature_vector)
else:
feature_vector = torch.from_numpy(feature_vector[0::stride])
if concat > 1:
add = (concat - feature_vector.size()[0] % concat) % concat
z = torch.FloatTensor(add, feature_vector.size()[1]).zero_()
feature_vector = torch.cat((feature_vector, z), 0)
feature_vector = feature_vector.reshape((int(feature_vector.size()[0] / concat),
feature_vector.size()[1] * concat))
if prev_context > 0:
print("Multiple ASR context isn't supported at the moment ")
raise NotImplementedError
if fp16 and output_format not in ['scp', 'scpmem']:
feature_vector = feature_vector.half()
if output_format not in ['scp', 'scpmem']:
data.append(feature_vector.numpy()) # convert to numpy for serialization
else:
data.append(path)
elif input_format == 'wav':
# an wav input file should have format uttid wav_file start end
# in which the start and end (by second) can be 0 0
if len(parts) >= 4:
wavpath, start_time, end_time = parts[1], float(parts[2]), float(parts[3])
else:
wavpath = parts[1]
start_time = 0
end_time = -1
if verbose:
print("processing wav file ...", wavpath, start_time, end_time)
# feature_vector = safe_readaudio(wavpath, start_time, end_time, sample_rate=sample_rate)
feature_vector = ark_loader.load_wav(wavpath, start_time, end_time, sample_rate=sample_rate)
# store a tuple of data and information to load the wav again during training
data.append((wavpath, start_time, end_time, sample_rate))
length = feature_vector.size(0)
lengths.append(length)
# if verbose and length > 256000:
# print('length: ', length)
line = f.readline()
if (index + 1) % 100000 == 0:
print("[INFO] Thread %d Processed %d audio utterances." % (worker_id, index + 1))
index = index + 1
result['data'] = data
result['sizes'] = lengths
result['id'] = worker_id
result['total'] = len(lengths)
return result
@staticmethod
def binarize_file(filename, input_format='scp', output_format='raw',
prev_context=0, concat=4, stride=1, fp16=False, num_workers=1, verbose=False):
result = dict()
for i in range(num_workers):
result[i] = dict()
final_result = dict()
def merge_result(bin_result):
result[bin_result['id']]['data'] = bin_result['data']
result[bin_result['id']]['sizes'] = bin_result['sizes']
offsets = Binarizer.find_offsets(filename, num_workers)
ark_loaders = dict()
for i in range(num_workers):
if input_format in ['scp', 'kaldi']:
ark_loaders[i] = ArkLoader()
elif input_format in ['wav']:
from .audio_utils import WavLoader
ark_loaders[i] = WavLoader()
else:
ark_loaders[i] = None
if num_workers > 1:
pool = mp.Pool(processes=num_workers)
mp_results = []
for worker_id in range(num_workers):
mp_results.append(pool.apply_async(
SpeechBinarizer.binarize_file_single_thread,
args=(filename, ark_loaders[worker_id], offsets[worker_id], offsets[worker_id + 1], worker_id,
input_format, output_format, prev_context, concat, stride, fp16, 16000, verbose),
))
pool.close()
pool.join()
for r in mp_results:
merge_result(r.get())
else:
sp_result = SpeechBinarizer.binarize_file_single_thread(filename, ark_loaders[0], offsets[0], offsets[1], 0,
input_format='scp', output_format=output_format,
prev_context=prev_context, concat=concat,
stride=stride, fp16=fp16, verbose=verbose)
merge_result(sp_result)
final_result['data'] = list()
final_result['sizes'] = list()
# put the data into the list according the worker indices
for idx in range(num_workers):
for j in range(len(result[idx]['data'])):
x = result[idx]['data'][j]
# if we store the numpy array, then convert to torch
# otherwise, x is the scp path to the matrix
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
final_result['data'].append(x)
final_result['sizes'] += result[idx]['sizes']
# remember to close the workers when its done
for i in range(num_workers):
if ark_loaders[i] is not None:
ark_loaders[i].close()
return final_result
class Binarizer:
def __init__(self):
pass
@staticmethod
def find_offsets(filename, num_chunks):
"""
:param filename: string
:param num_chunks: int
:return: a list of offsets (positions to start and stop reading)
"""
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
@staticmethod
def binarize_file_single_thread(filename, tokenizer, vocab, worker_id=0, bos_word=None, eos_word=None,
offset=0, end=-1, data_type='int64', verbose=False,
external_tokenizer=[None, None], lang=None, target=False):
"""
This function should read in the lines, convert sentences to tensors
And then finalize into a dataset?
"""
result = dict()
unk_word = onmt.constants.UNK_WORD
data = list()
sizes = list()
count = 0
ext_tokenizer, external_tokenizer_name = external_tokenizer
with open(filename, 'r', encoding='utf-8') as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
n_bad_sentences = 0
while line:
if 0 < end < f.tell():
break
if ext_tokenizer is None:
tokenized_sent = tokenizer.tokenize(line)
binarized_line = vocab.convertToIdx(tokenized_sent, unk_word,
bos_word=bos_word, eos_word=eos_word, type=data_type)
# move to shared_memory to transfer between threads
# conversion to numpy is necessary because torch.Tensor is not serializable by the mprocess
data += [binarized_line.numpy()]
sizes += [len(tokenized_sent)]
else:
tensor = ext_tokenizer(line.strip())['input_ids']
# print(tensor)
# assert that the mbart50 tokenizer uses the correct language ID
if "mbart-large-50" in external_tokenizer_name.lower():
assert tensor[0] == vocab.convertToIdx([lang], None)[0], "The first token must be language ID"
pad_id = vocab.convertToIdx(["<pad>"], None)[0]
assert pad_id not in tensor, "Pad is not supposed to appear in the tensors."
elif "m2m" in external_tokenizer_name.lower():
lang_token = "__" + lang + "__"
assert tensor[0] == vocab.convertToIdx([lang_token], None)[0], \
"The first token must be language ID"
pad_id = vocab.convertToIdx(["<pad>"], None)[0]
assert pad_id not in tensor, "Pad is not supposed to appear in the tensors."
elif "deltalm" in external_tokenizer_name.lower():
if len(tensor) > 2:
if tensor[0] not in [0, 1, 2, 3]:
assert tensor[0] == vocab.convertToIdx([lang], None)[0], "The first token must be language ID"
pad_id = vocab.convertToIdx(["<pad>"], None)[0]
assert pad_id not in tensor, "Pad is not supposed to appear in the tensors."
if target and tensor[0] != tensor[-1]:
# for the target side and in the multilingual case it is <eos> <langid> X <eos>
tensor = [tensor[-1]] + tensor
elif "mbart50eu" in external_tokenizer_name.lower():
if len(tensor) > 2:
if tensor[0] not in [0, 1, 2, 3]:
_lang = _lang if lang != "eu" else "en_XX"
assert tensor[0] == vocab.convertToIdx([lang], None)[0], \
"The first token must be language ID, expecting %d get %d. Current language: %s" \
% (vocab.convertToIdx([lang], None)[0], tensor[0], ext_tokenizer.src_lang)
# pad_id = vocab.convertToIdx(["<pad>"], None)[0]
# assert pad_id not in tensor, "Pad is not supposed to appear in the tensors."
if len(tensor) <= 2:
n_bad_sentences += 1
# print("[Warning] empty sentence with %d tokens including <bos> <eos>" % len(tensor))
sizes += [len(tensor)]
_dtype = np.int32
if data_type == "int64":
_dtype = np.int64
elif data_type == "int16":
_dtype = np.int16
data += [np.asarray(tensor, dtype=_dtype)]
line = f.readline()
count += 1
if count % 100000 == 0:
if verbose:
print("[INFO] Thread %d processed %d lines." % (worker_id, count))
if verbose:
if n_bad_sentences > 0:
print("[Warning] %d empty sentence including <bos> <eos>" % n_bad_sentences)
print("[INFO] Thread %d Done." % worker_id)
result['data'] = data
result['sizes'] = sizes
result['id'] = worker_id
result['total'] = len(sizes)
return result
@staticmethod
def binarize_file(filename, vocab, tokenizer, bos_word=None, eos_word=None,
data_type='int64', num_workers=1, verbose=False, external_tokenizer="",
lang=None, lang_list=[], target=False):
if "mbart-large-50" in external_tokenizer.lower():
print("[INFO] Using the external %s tokenizer..." % external_tokenizer)
from transformers import MBart50TokenizerFast
try: # check if this tokenizer is saved locally or not
print("Looking for pre-downloaded tokenizer ...")
ext_tokenizer = torch.load("mbart-large-50.tokenizer.pt")
ext_tokenizer.src_lang = lang
if ext_tokenizer.src_lang != lang:
raise RuntimeError("The language %s does not exist in mBART50." % lang)
except FileNotFoundError as e:
print("Expected error: ", e, "Downloading tokenizer ...")
ext_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50")
ext_tokenizer.src_lang = lang
# ext_tokenizer.src_lang = lang
if ext_tokenizer.src_lang != lang:
raise RuntimeError("The language %s does not exist in mBART50." % lang)
torch.save(ext_tokenizer, "mbart-large-50.tokenizer.pt")
elif "m2m100" in external_tokenizer.lower():
print("[INFO] Using the external %s tokenizer..." % external_tokenizer)
from transformers import M2M100Tokenizer
ext_tokenizer = M2M100Tokenizer.from_pretrained(external_tokenizer, src_lang=lang)
ext_tokenizer.src_lang = lang
if ext_tokenizer.src_lang != lang:
raise RuntimeError("The language %s does not exist in M2M100." % lang)
elif "mbart50eu" in external_tokenizer.lower():
print("[INFO] Using the MBART50EU tokenizer...")
from transformers import MBart50TokenizerFast
# from pretrain_module.tokenization_mbart50eu import MBART50TokenizerEU
# src_lang = lang if lang != "eu" else "en_XX"
src_lang = "<s>"
ext_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50")
ext_tokenizer.src_lang = src_lang
elif "bart" in external_tokenizer.lower():
print("[INFO] Using the external BART tokenizer...")
from transformers import BartTokenizer
ext_tokenizer = BartTokenizer.from_pretrained(external_tokenizer)
elif "deltalm" in external_tokenizer.lower():
print("[INFO] Using the DeltaLM tokenizer...")
from pretrain_module.tokenization_deltalm import MultilingualDeltaLMTokenizer
ext_tokenizer = MultilingualDeltaLMTokenizer.from_pretrained("facebook/mbart-large-50", lang_list=lang_list,
src_lang=lang)
# from pretrain_module.tokenization_deltalm import DeltaLMTokenizer
# try: # check if this tokenizer is saved locally or not
# ext_tokenizer = torch.load("deltalm.tokenizer.pt")
# ext_tokenizer.src_lang = lang
# except FileNotFoundError:
# ext_tokenizer = DeltaLMTokenizer.from_pretrained("facebook/mbart-large-50", src_lang=lang)
elif "nllb" in external_tokenizer.lower():
from transformers import NllbTokenizer
from pretrain_module.tokenization_deltalm import DeltaLMTokenizer
try: # check if this tokenizer is saved locally or not
ext_tokenizer = torch.load("nllb.tokenizer.pt")
ext_tokenizer.src_lang = lang
except FileNotFoundError:
ext_tokenizer = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M", src_lang=lang)
torch.save(ext_tokenizer, "nllb.tokenizer.pt")
elif external_tokenizer is None or len(external_tokenizer) == 0:
ext_tokenizer = None
else:
raise NotImplementedError
ext_tokenizer = [ext_tokenizer, external_tokenizer]
result = dict()
for i in range(num_workers):
result[i] = dict()
final_result = dict()
def merge_result(bin_result):
result[bin_result['id']]['data'] = bin_result['data']
result[bin_result['id']]['sizes'] = bin_result['sizes']
offsets = Binarizer.find_offsets(filename, num_workers)
if num_workers > 1:
pool = mp.Pool(processes=num_workers)
mp_results = []
for worker_id in range(num_workers):
mp_results.append(pool.apply_async(
Binarizer.binarize_file_single_thread,
args=(filename, tokenizer, vocab, worker_id, bos_word, eos_word,
offsets[worker_id], offsets[worker_id + 1], data_type, verbose, ext_tokenizer, lang, target),
))
pool.close()
pool.join()
for r in mp_results:
merge_result(r.get())
else:
sp_result = Binarizer.binarize_file_single_thread(filename, tokenizer, vocab, 0, bos_word, eos_word,
offsets[0], offsets[1], data_type,
external_tokenizer=ext_tokenizer,
lang=lang, target=target)
merge_result(sp_result)
final_result['data'] = list()
final_result['sizes'] = list()
# put the data into the list according the worker indices
for idx in range(num_workers):
final_result['data'] += result[idx]['data']
final_result['sizes'] += result[idx]['sizes']
return final_result
| 20,533 | 40.906122 | 126 | py |
NMTGMinor | NMTGMinor-master/onmt/data/multi_dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
from .dataset import Dataset
from .mmap_indexed_dataset import MMapIndexedDataset
from .scp_dataset import SCPIndexDataset
| 242 | 21.090909 | 52 | py |
NMTGMinor | NMTGMinor-master/onmt/data/multidata_iterator.py | import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
from .data_iterator import EpochBatchIterating, DataIterator
import numpy as np
import torch
class MultiEpochIterator(object):
# this class stores N epoch iterators for N datasets
# init is called at the beginning of the epoch
def __init__(self, iterators, round_robin=False):
"""
:param iterators: a list of CountingIterators
:param round_robin: if the data is sampled iteratively 1 to N or randomly
"""
self.iterators = iterators
self.round_robin = round_robin
self.n_iterators = len(iterators)
# self.total = sum([len(iterator) for iterator in self.iterators])
self.sizes = [len(iterator) for iterator in self.iterators]
self.total = sum(self.sizes)
self.itr = iter(self)
if self.round_robin:
self.itr_indices = torch.arange(self.n_iterators)
else:
# self.itr_indices = torch.randperm(self.n_iterators)
with torch.no_grad():
self.itr_indices = torch.Tensor(self.sizes).div(self.total)
self.idx = -1
self.n_yielded = 0
def iterations_in_epoch(self):
"""
:return: a list of iterations in epoch for each iterator
"""
return [iterator.n for iterator in self.iterators]
def load_iterations(self, iteration_in_epochs):
for iterator, iter_in_epoch in zip(self.iterators, iteration_in_epochs):
iterator.n = iter_in_epoch
def __len__(self):
return sum([len(iterator) for iterator in self.iterators])
def __iter__(self):
while True:
if self.n_yielded >= self.total:
return
if self.round_robin:
self.idx = self.idx + 1
if self.idx >= self.n_iterators:
self.idx = 0
cur_iterator = self.iterators[self.itr_indices[self.idx]]
# if the current iterator is not exhausted, then yield
# otherwise go to the next one
if cur_iterator.has_next():
self.n_yielded += 1
yield next(cur_iterator)
else:
continue
else:
# sample randomly from the iterators
# large datasets will be likely to generate more samples
# smaller datasets will be less likely
# but averaging-out, the model is more balanced than round-robin
sampled_itr = torch.multinomial(self.itr_indices, 1).unsqueeze(-1).item()
# if the current iterator is not exhausted, then yield
# otherwise resample
cur_iterator = self.iterators[sampled_itr]
if cur_iterator.has_next():
self.n_yielded += 1
yield next(cur_iterator)
else:
# zero-out that index to avoid sampling into the same empty iterator
with torch.no_grad():
self.itr_indices[sampled_itr].zero_()
continue
def __next__(self):
return next(self.itr)
def has_next(self):
return self.n_yielded < self.total
def skip(self, num_to_skip):
for iterator in self.iterators:
iterator.skip(num_to_skip)
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
for iterator in self.iterators:
iterator.take(n)
class MultiDataIterator(EpochBatchIterating):
def next_epoch_itr(self, shuffle=True, pin_memory=False):
self.epoch = self.next_epoch_idx
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch, shuffle, pin_memory=pin_memory
)
for dataset in self.datasets:
dataset.set_epoch(self.epoch)
self.shuffle = shuffle
return self._cur_epoch_itr
# each dataset = dataiterator > generate 1 epoch iterator
# this class gen
def __init__(self, datasets, seed=1., num_workers=0, epoch=1, buffer_size=0,
timeout=0, round_robin=False, num_shards=1, shard_id=0, split_even=True, dataset_ids=None):
"""
:param datasets: list of Datasets
:param seed: randomizing seed to
:param num_workers:
:param epoch:
:param buffer_size:
:param timeout:
:param round_robin:
:param num_shards:
:param shard_id:
:param split_even: Split the datasets evenly (otherwise adding samples)
:param dataset_ids: Selectively choose datasets involved
"""
self.datasets = datasets
self.data_iterators = list()
for i, dataset in enumerate(datasets):
if dataset_ids is not None and len(dataset_ids) > 0:
if i not in dataset_ids:
continue
self.data_iterators.append(DataIterator(dataset, dataset.get_collater(), dataset.get_batches(), seed=seed,
num_workers=num_workers, epoch=epoch, buffer_size=buffer_size,
timeout=timeout, num_shards=num_shards,
shard_id=shard_id, split_even=split_even))
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._support_prefetch = False
self.round_robin = round_robin
self.epoch = max(epoch, 1)
self.n_samples = sum([dataset.get_size() for dataset in self.datasets])
def __len__(self):
return sum([len(data_iterator) for data_iterator in self.data_iterators])
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called"""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def end_of_epoch(self) -> bool:
return not self._cur_epoch_itr.has_next()
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
'shuffle': self.shuffle,
}
@property
def iterations_in_epoch(self):
""" The number of consumed batches in the current epoch"""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.iterations_in_epoch()
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.iterations_in_epoch()
return [0] * len(self.data_iterators)
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
'shuffle': self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
if state_dict is not None:
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', [0] * len(self.data_iterators))
if sum(itr_pos) > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get('shuffle', True),
offsets=itr_pos
)
if self._next_epoch_itr is None:
# we finished the epoch, increment epoch counter
self.epoch += 1
else:
self._next_epoch_itr = None
else:
self.epoch = 1
itr_pos = list()
self._next_epoch_itr = None
def _get_iterator_for_epoch(self, epoch, shuffle=False, offsets=None, pin_memory=False):
epoch_iterators = list()
if offsets is not None and sum(offsets) >= self.n_samples:
return None
if offsets is None:
offsets = [0] * len(self.data_iterators)
# first, generate an iterator for each data iterator
for (data_iterator, offset) in zip(self.data_iterators, offsets):
epoch_iterator = data_iterator._get_iterator_for_epoch(epoch, shuffle, offset, pin_memory=pin_memory)
epoch_iterators.append(epoch_iterator)
# next, use an multi epoch iterator
epoch_iterator = MultiEpochIterator(epoch_iterators, round_robin=self.round_robin)
return epoch_iterator
| 9,036 | 34.163424 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/data/scp_dataset.py | import torch
from kaldiio import load_mat
from functools import lru_cache
import numpy as np
from .audio_utils import _parse_arkpath, ArkLoader
import warnings
warnings.filterwarnings("ignore", message="The given NumPy array is not writeable ")
class SCPIndexDataset(torch.utils.data.Dataset):
"""
This dataset simply stores a list of paths to ark matrices
The __get__ function uses load_mat from kaldiio to read the ark matrices for retrieval
"""
def __init__(self, scp_path_list, concat=4, shared_object=None):
"""
:param scp_path_list: list of path to the ark matrices
"""
self.scp_path_list = scp_path_list
self._sizes = len(self.scp_path_list)
self._dtype = torch.float32
self.concat = concat
if shared_object is not None:
self.reader = shared_object.reader
else:
self.reader = ArkLoader()
@property
def dtype(self):
# I'm not sure when this function is called
return self._dtype
@property
def sizes(self):
return self._sizes
def __len__(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
scp_path = self.scp_path_list[i]
mat = self.reader.load_mat(scp_path)
feature_vector = torch.from_numpy(mat)
concat = self.concat
if concat > 1:
add = (concat - feature_vector.size()[0] % concat) % concat
z = torch.FloatTensor(add, feature_vector.size()[1]).zero_()
feature_vector = torch.cat((feature_vector, z), 0)
feature_vector = feature_vector.reshape((int(feature_vector.size()[0] / concat),
feature_vector.size()[1] * concat))
return feature_vector
@property
def sizes(self):
return self._index.sizes | 1,877 | 29.786885 | 92 | py |
NMTGMinor | NMTGMinor-master/onmt/data/indexed_dataset.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import struct
import numpy as np
import torch
import torch.utils.data
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: float,
7: np.double,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for TorchNet IndexedDataset"""
def __init__(self, path):
super().__init__()
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == b'TNTIDX\x00\x00'
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self.size, self.s = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, self.size + 1)
self.data_offsets = read_longs(f, self.size + 1)
self.sizes = read_longs(f, self.s)
self.read_data(path)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError('index out of range')
def __del__(self):
self.data_file.close()
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
return item
def __len__(self):
return self.size
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and
os.path.exists(data_file_path(path))
)
class IndexedInMemoryDataset(IndexedDataset):
"""Loader for TorchNet IndexedDataset, keeps all the data in memory"""
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb')
self.buffer = np.empty(self.data_offsets[-1], dtype=self.dtype)
self.data_file.readinto(self.buffer)
self.data_file.close()
def __del__(self):
pass
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
np.copyto(a, self.buffer[self.data_offsets[i]:self.data_offsets[i + 1]])
return torch.from_numpy(a).long()
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close() | 4,543 | 28.128205 | 84 | py |
NMTGMinor | NMTGMinor-master/onmt/data/audio_utils.py | import numpy as np
from contextlib import contextmanager
import io
from io import TextIOBase
import os
import subprocess
import sys
import warnings
from functools import partial
from io import BytesIO
from io import StringIO
import re
import struct
import sys
import warnings
import soundfile
import math
import torch
from .kaldiio.compression_header import GlobalHeader
from .kaldiio.compression_header import PerColHeader
from .kaldiio.utils import default_encoding
from .kaldiio.utils import LazyLoader
from .kaldiio.utils import MultiFileDescriptor
from .kaldiio.utils import open_like_kaldi
from .kaldiio.utils import open_or_fd
from .kaldiio.utils import seekable
from .kaldiio.wavio import read_wav
from .kaldiio.wavio import write_wav
PY3 = sys.version_info[0] == 3
if PY3:
from collections.abc import Mapping
binary_type = bytes
string_types = str,
else:
from collections import Mapping
binary_type = str
string_types = basestring, # noqa: F821
# load scp function
# audio downsampling function
def _parse_arkpath(ark_name):
"""Parse arkpath
Args:
ark_name (str):
Returns:
Tuple[str, int, Optional[Tuple[slice, ...]]]
Examples:
>>> _parse_arkpath('a.ark')
'a.ark', None, None
>>> _parse_arkpath('a.ark:12')
'a.ark', 12, None
>>> _parse_arkpath('a.ark:12[3:4]')
'a.ark', 12, (slice(3, 4, None),)
>>> _parse_arkpath('cat "fo:o.ark" |')
'cat "fo:o.ark" |', None, None
"""
if ark_name.rstrip()[-1] == '|' or ark_name.rstrip()[0] == '|':
# Something like: "| cat foo" or "cat bar|" shouldn't be parsed
return ark_name, None, None
slices = None
if ':' in ark_name:
fname, offset = ark_name.split(':', 1)
if '[' in offset and ']' in offset:
offset, Range = offset.split('[')
# Range = [3:6, 10:30]
Range = Range.replace(']', '').strip()
slices = _convert_to_slice(Range)
offset = int(offset)
else:
fname = ark_name
offset = None
return fname, offset, slices
def read_int32vector(fd, endian='<', return_size=False):
assert fd.read(2) == b'\0B'
assert fd.read(1) == b'\4'
length = struct.unpack(endian + 'i', fd.read(4))[0]
array = np.empty(length, dtype=np.int32)
for i in range(length):
assert fd.read(1) == b'\4'
array[i] = struct.unpack(endian + 'i', fd.read(4))[0]
if return_size:
return array, (length + 1) * 5 + 2
else:
return array
def read_matrix_or_vector(fd, endian='<', return_size=False):
"""Call from load_kaldi_file
Args:
fd (file):
endian (str):
return_size (bool):
"""
size = 0
assert fd.read(2) == b'\0B'
size += 2
Type = str(read_token(fd))
size += len(Type) + 1
# CompressedMatrix
if 'CM' == Type:
# Read GlobalHeader
global_header = GlobalHeader.read(fd, Type, endian)
size += global_header.size
per_col_header = PerColHeader.read(fd, global_header)
size += per_col_header.size
# Read data
buf = fd.read(global_header.rows * global_header.cols)
size += global_header.rows * global_header.cols
array = np.frombuffer(buf, dtype=np.dtype(endian + 'u1'))
array = array.reshape((global_header.cols, global_header.rows))
# Decompress
array = per_col_header.char_to_float(array)
array = array.T
elif 'CM2' == Type:
# Read GlobalHeader
global_header = GlobalHeader.read(fd, Type, endian)
size += global_header.size
# Read matrix
buf = fd.read(2 * global_header.rows * global_header.cols)
array = np.frombuffer(buf, dtype=np.dtype(endian + 'u2'))
array = array.reshape((global_header.rows, global_header.cols))
# Decompress
array = global_header.uint_to_float(array)
elif 'CM3' == Type:
# Read GlobalHeader
global_header = GlobalHeader.read(fd, Type, endian)
size += global_header.size
# Read matrix
buf = fd.read(global_header.rows * global_header.cols)
array = np.frombuffer(buf, dtype=np.dtype(endian + 'u1'))
array = array.reshape((global_header.rows, global_header.cols))
# Decompress
array = global_header.uint_to_float(array)
else:
if Type == 'FM' or Type == 'FV':
dtype = endian + 'f'
bytes_per_sample = 4
elif Type == 'HM':
dtype = endian + 'e'
bytes_per_sample = 2
elif Type == 'DM' or Type == 'DV':
dtype = endian + 'd'
bytes_per_sample = 8
else:
raise ValueError(
'Unexpected format: "{}". Now FM, FV, DM, DV, '
'CM, CM2, CM3 are supported.'.format(Type))
assert fd.read(1) == b'\4'
size += 1
rows = struct.unpack(endian + 'i', fd.read(4))[0]
size += 4
dim = rows
if 'M' in Type: # As matrix
assert fd.read(1) == b'\4'
size += 1
cols = struct.unpack(endian + 'i', fd.read(4))[0]
size += 4
dim = rows * cols
buf = fd.read(dim * bytes_per_sample)
size += dim * bytes_per_sample
array = np.frombuffer(buf, dtype=np.dtype(dtype))
if 'M' in Type: # As matrix
array = np.reshape(array, (rows, cols))
if return_size:
return array, size
else:
return array
def read_ascii_mat(fd, return_size=False):
"""Call from load_kaldi_file
Args:
fd (file): binary mode
return_size (bool):
"""
string = []
size = 0
# Find '[' char
while True:
b = fd.read(1)
try:
char = b.decode(encoding=default_encoding)
except UnicodeDecodeError:
raise ValueError('File format is wrong?')
size += 1
if char == ' ' or char == '\n':
continue
elif char == '[':
hasparent = True
break
else:
string.append(char)
hasparent = False
break
# Read data
ndmin = 1
while True:
char = fd.read(1).decode(encoding=default_encoding)
size += 1
if hasparent:
if char == ']':
char = fd.read(1).decode(encoding=default_encoding)
size += 1
assert char == '\n' or char == ''
break
elif char == '\n':
ndmin = 2
elif char == '':
raise ValueError(
'There are no corresponding bracket \']\' with \'[\'')
else:
if char == '\n' or char == '':
break
string.append(char)
string = ''.join(string)
assert len(string) != 0
# Examine dtype
match = re.match(r' *([^ \n]+) *', string)
if match is None:
dtype = np.float32
else:
ma = match.group(0)
# If first element is integer, deal as interger array
try:
float(ma)
except ValueError:
raise RuntimeError(
ma + 'is not a digit\nFile format is wrong?')
if '.' in ma:
dtype = np.float32
else:
dtype = np.int32
array = np.loadtxt(StringIO(string), dtype=dtype, ndmin=ndmin)
if return_size:
return array, size
else:
return array
def read_token(fd):
"""Read token
Args:
fd (file):
"""
token = []
# Keep the loop until finding ' ' or end of char
while True:
c = fd.read(1)
if c == b' ' or c == b'':
break
token.append(c)
if len(token) == 0: # End of file
return None
decoded = b''.join(token).decode(encoding=default_encoding)
return decoded
def read_kaldi(fd, endian='<', return_size=False):
"""Load kaldi
Args:
fd (file): Binary mode file object. Cannot input string
endian (str):
return_size (bool):
"""
assert endian in ('<', '>'), endian
binary_flag = fd.read(4)
assert isinstance(binary_flag, binary_type), type(binary_flag)
if seekable(fd):
fd.seek(-4, 1)
else:
fd = MultiFileDescriptor(BytesIO(binary_flag), fd)
if binary_flag[:4] == b'RIFF':
# array: Tuple[int, np.ndarray]
array, size = read_wav(fd, return_size=True)
# Load as binary
elif binary_flag[:2] == b'\0B':
if binary_flag[2:3] == b'\4': # This is int32Vector
array, size = read_int32vector(fd, endian, return_size=True)
else:
array, size = read_matrix_or_vector(fd, endian, return_size=True)
# Load as ascii
else:
array, size = read_ascii_mat(fd, return_size=True)
if return_size:
return array, size
else:
return array
class ArkLoader(object):
def __init__(self, fastest=True):
self.current_ark = None
self.reader = None
self.readers = dict()
self.fastest = fastest
def load_mat(self, ark_name, endian='<', as_bytes=False):
assert endian in ('<', '>'), endian
ark, offset, slices = _parse_arkpath(ark_name)
if not self.fastest:
if self.current_ark != ark:
if self.reader is not None:
self.reader.close()
self.reader = open_like_kaldi(ark, 'rb')
self.current_ark = ark
return self.read_mat(self.reader, offset, slices, endian=endian, as_bytes=as_bytes)
else:
if ark not in self.readers:
self.readers[ark] = open_like_kaldi(ark, 'rb')
fd = self.readers[ark]
return self.read_mat(fd, offset, slices, endian=endian, as_bytes=as_bytes)
def read_mat(self, fd, offset, slices, endian='<', as_bytes=False):
if offset is not None:
fd.seek(offset)
if not as_bytes:
array = read_kaldi(fd, endian)
else:
array = fd.read()
if slices is not None:
if isinstance(array, (tuple, list)):
array = (array[0], array[1][slices])
else:
array = array[slices]
return array
def close(self):
if self.reader is not None:
self.reader.close()
for k in self.readers:
self.readers[k].close()
def safe_readaudio_from_cache(file_, start=0.0, end=0.0, sample_rate=16000):
offset = math.floor(sample_rate * start)
num_frames = -1 if end <= start else math.ceil(sample_rate * (end - start))
dtype = "float32"
frames = file_._prepare_read(offset, None, num_frames)
waveform = file_.read(frames, dtype, always_2d=True)
sample_rate_ = file_.samplerate
tensor = torch.from_numpy(waveform)
tensor = tensor[:, 0].unsqueeze(1)
return tensor
class WavLoader(object):
def __init__(self, cache_size=512):
"""
:param scp_path_list: list of path to the ark matrices
"""
if cache_size > 0:
self.cache = dict()
self.usage = dict()
else:
self.cache = None
self.cache_size = cache_size
def load_wav(self, wav_path, start, end, sample_rate=16000):
# take the object in cache if exists
if wav_path in self.cache:
file_ = self.cache[wav_path]
self.usage[wav_path] = self.usage[wav_path] + 1
else:
# read the audio file
# print(os.path.exists(wav_path), wav_path)
file_ = soundfile.SoundFile(wav_path, 'r')
if len(self.cache) > self.cache_size:
# remove 1 file from cache based on lowest usage, maybe?
min_key = min(self.usage, key=self.usage.get)
if min_key != wav_path: # don't close the current file
self.cache[min_key].close()
self.cache.pop(min_key, None)
self.usage.pop(min_key, None)
# add the object to the cache
self.cache[wav_path] = file_
self.usage[wav_path] = 1
data = safe_readaudio_from_cache(file_, start, end, sample_rate)
return data
def close(self):
for wav_path in self.cache:
self.cache[wav_path].close() | 12,439 | 27.863109 | 95 | py |
NMTGMinor | NMTGMinor-master/onmt/data/lm_dataset.py | from __future__ import division
import math
import torch
import torch.utils.data
from collections import defaultdict
import onmt
from onmt.data.dataset import Dataset
class LanguageModelBatch(object):
def __init__(self, data, target, lang, **kwargs):
self.data = data
self.target = target
self.lang = lang
self.tensors = defaultdict(lambda: None)
self.tensors['target_input'] = data
self.tensors['target_output'] = target
self.tensors['target_lang'] = lang
self.tgt_size = target.numel()
self.src_size = 0
self.size = target.size(1)
def get(self, name):
if name in self.tensors:
return self.tensors[name]
else:
return None
def cuda(self, fp16=False):
"""
Send the minibatch data into GPU. Old-fashioned without the 'device' control
:param fp16:
:return: None
"""
for key, tensor in self.tensors.items():
if isinstance(tensor, dict):
for k in tensor:
v = tensor[k]
tensor[k] = v.cuda()
elif tensor is not None:
if tensor.type() == "torch.FloatTensor" and fp16:
self.tensors[key] = tensor.half()
self.tensors[key] = self.tensors[key].cuda()
else:
continue
class LanguageModelDataset(Dataset):
def __init__(self, data, langs, batch_size_sents=128, batch_size_words=9999,
seq_length=64, **kwargs):
# concatenate all sentences in the data to get a stream
if len(langs) <= 1:
self.single_language = True
else:
self.single_language = False
if not self.single_language:
self.langs = [torch.Tensor([data[i].size(0)]).fill_(langs[i]) for i in range(len(langs))]
else:
self.langs = langs
self.langs = torch.cat(self.langs, dim=0).long()
self.data = torch.cat(data, dim=0).long()
self.batch_size_sents = batch_size_sents
self.batch_size_words = batch_size_words
self.seq_length = seq_length
self.bptt = seq_length
full_length = sum([x.size(0) for x in data])
# group samples into mini batches
self.num_batches = 0
self.batches = []
self.allocate_batch()
self.fullSize = self.num_batches
self.cur_index = 0
self.batchOrder = None
def allocate_batch(self):
self.n_step = self.data.size(0) // self.batch_size_sents
self.data = self.data.narrow(0, 0, self.n_step * self.batch_size_sents)
# Evenly divide the data across the bsz batches.
self.data = self.data.view(self.batch_size_sents, -1).t().contiguous()
# self.num_steps = nbatch - 1
# self.num_batches = (self.n_step + self.seq_length - 1) // self.seq_length
self.batches = []
for i in range(0, self.data.size(0) - 1, self.bptt):
bptt = self.seq_length
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i)
data = self.data[beg_idx:end_idx]
target = self.data[i + 1:i + 1 + seq_len]
if self.single_language:
lang = self.langs
else:
lang = self.langs[beg_idx:end_idx]
self.batches.append((data, target, lang))
self.num_batches = len(self.batches)
# genereate a new batch - order (static)
def create_order(self, random=False):
# For language model order shouldn't be random
self.batchOrder = torch.arange(self.num_batches).long()
self.cur_index = 0
return self.batchOrder
# return the next batch according to the iterator
# for language model
def next(self, curriculum=True, reset=True, split_sizes=1):
# reset iterator if reach data size limit
# if self.cur_index >= self.num_batches:
# if reset:
# self.cur_index = 0
# else:
# return None
#
# batch_index = self.cur_index
#
# seq_len = self.seq_length
#
# top_index = min(batch_index + seq_len, self.data.size(0) - 1)
#
# batch = LMBatch(self.data[batch_index:top_index], target=self.data[batch_index + 1:top_index + 1])
#
# # move the iterator one step
# self.cur_index += seq_len
if self.cur_index >= self.num_batches:
if reset:
self.cur_index = 0
else:
return None
data, target, lang = self.batches[self.cur_index]
batch = LanguageModelBatch(data, target, lang)
self.cur_index += 1
return [batch]
| 4,860 | 28.822086 | 108 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/classify_trainer.py | from __future__ import division
import datetime
import gc
import inspect_model
import math
import os
import re
import time
import torch
import copy
import sys
import contextlib
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.multidata_iterator import MultiDataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_classifier, optimize_model, init_model_parameters
from onmt.model_factory import init_model_parameters
from onmt.modules.loss import ClassifierLoss
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP_model
from torch.cuda.amp import autocast
import warnings
# ignore the pytorch -> numpy conversion warnings
warnings.filterwarnings("ignore", category=UserWarning)
def prepare_sample(batch, device=None):
"""
Put minibatch on the corresponding GPU
:param batch:
:param device:
:return:
"""
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
batch.cuda(fp16=False, device=device)
return batch
def generate_data_iterator(dataset, rank, world_size, seed,
num_workers=1, epoch=1., buffer_size=0):
# check if dataset is a list:
if isinstance(dataset, list):
# this is a multidataset
data_iterator = MultiDataIterator(dataset, seed=seed, num_workers=num_workers,
epoch=epoch, buffer_size=buffer_size,
num_shards=world_size, shard_id=rank)
else:
data_iterator = DataIterator(dataset, dataset.collater, dataset.batches, seed=seed,
num_workers=num_workers, epoch=epoch, buffer_size=buffer_size,
num_shards=world_size, shard_id=rank)
return data_iterator
def zero_tensor(device=None):
if device is None:
return torch.Tensor([0]).cuda()
else:
return torch.Tensor([0]).to(device)
class ClassifierTrainer(object):
def __init__(self, device, train_data, valid_data, dicts, opt, setup_optimizer=True):
"""
:param model:
:param device: int (GPU id)
:param loss_function:
:param train_data:
:param valid_data:
:param dicts:
:param opt:
"""
self.device = device
opt.node_rank = 0
opt.nodes = 1
self.world_size = len(opt.gpus)
# in the case of single node distributed, it should equal self.device
self.rank = self.device
# make a group to later use with self.all_reduce
self.group = dist.group.WORLD
self.print("[INFO] Training Options:", opt)
if self.world_size > 1:
dist.init_process_group(backend='nccl', init_method='env://', world_size=self.world_size, rank=self.rank)
self.model = None
if self.rank == 0:
self.train_data = train_data
self.valid_data = valid_data
else:
# Do we really need to deepcopy the data instances (which could cause memory leak easily)
self.train_data = copy.deepcopy(train_data)
self.valid_data = copy.deepcopy(valid_data)
self.dicts = dicts
self.opt = opt
self.cuda = (len(opt.gpus) >= 1 and opt.gpus[0] >= 0)
assert self.cuda, "[ERROR] Training is only available on GPUs."
self.start_time = 0
# setting up models and others
torch.manual_seed(self.opt.seed)
if self.is_main():
print("[INFO] Building models .... ", flush=True)
model = build_classifier(opt, dicts)
loss_function = ClassifierLoss(opt.model_size, dicts['tgt'].size(), label_smoothing=opt.label_smoothing)
# This function replaces modules with the more optimized counterparts so that it can run faster
# Currently exp with LayerNorm
# if not opt.memory_profiling:
# # distributed is required to convert BatchNorm to SyncBatchNorm for DDP
optimize_model(model, distributed=(self.world_size > 1))
if 'wav2vec2' not in opt.model:
init_model_parameters(model, opt)
self.model = model
self.loss_function = loss_function
self.grad_scaler = torch.cuda.amp.GradScaler()
if opt.mpc:
from onmt.modules.loss import MPCLoss
self.mpc_loss = MPCLoss()
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
self.model.load_state_dict(checkpoint['model'])
if 'scaler' in checkpoint and checkpoint['scaler'] is not None:
self.grad_scaler.load_state_dict(checkpoint['scaler'])
if self.cuda:
torch.cuda.set_device(self.device)
self.model = self.model.cuda(device=self.device)
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if self.is_main():
print("[INFO] Optimizer: ", self.optim.optimizer)
if opt.load_from:
if 'optim' in checkpoint and checkpoint['optim'] is not None and not opt.reset_optim:
self.optim.load_state_dict(checkpoint['optim'])
if self.world_size > 1:
# find_unused_parameters may be required for dropped layer (parameters that are not connected to
# any particular graph)
find_unused_parameters = False if opt.death_rate == 0.0 else True
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.rank],
output_device=self.rank,
find_unused_parameters=find_unused_parameters)
print("[INFO] Process %d ready." % self.rank, flush=True)
def is_main(self):
return self.rank == 0
def all_reduce(self, tensor, **kwargs):
if self.world_size > 1:
dist.all_reduce(tensor, **kwargs)
# otherwise, do nothing
return
def print(self, *content, flush=False):
"""
A helper function to print only on the main process
:param flush:
:param content:
:return:
"""
if self.is_main():
print(*content, flush=flush)
else:
return
# def load_encoder_weight(self, checkpoint_file):
#
# print("Loading pretrained Encoder Weights from %s" % checkpoint_file, flush=True)
# checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
#
# pretrained_model = build_model(checkpoint['opt'], checkpoint['dicts'])
# pretrained_model.load_state_dict(checkpoint['model'])
#
# model = self.model.module if self.world_size > 1 else self.model
#
# model.load_encoder_weights(pretrained_model)
#
# return
#
# def load_decoder_weight(self, checkpoint_file):
#
# self.print("Loading pretrained models from %s" % checkpoint_file)
# checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
# chkpoint_dict = checkpoint['dicts']
#
# pretrained_model = build_model(checkpoint['opt'], chkpoint_dict)
# pretrained_model.load_state_dict(checkpoint['model'])
#
# self.print("Loading pretrained decoder weights ...")
# # first we have to remove the embeddings which probably have difference size ...
# pretrained_word_emb = pretrained_model.decoder.word_lut
# pretrained_model.decoder.word_lut = None
# pretrained_lang_emb = pretrained_model.decoder.language_embeddings
# pretrained_model.decoder.language_embeddings = None
#
# # actually we assume that two decoders have the same language embeddings...
# untrained_word_emb = self.model.decoder.word_lut
# self.model.decoder.word_lut = None
# untrained_lang_emb = self.model.decoder.language_embeddings
# self.model.decoder.language_embeddings = None
#
# decoder_state_dict = pretrained_model.decoder.state_dict()
# self.model.decoder.load_state_dict(decoder_state_dict)
#
# # now we load the embeddings ....
# n_copies = 0
# for token in self.dicts['tgt'].labelToIdx:
#
# untrained_id = self.dicts['tgt'].labelToIdx[token]
#
# if token in chkpoint_dict['tgt'].labelToIdx:
# pretrained_id = chkpoint_dict['tgt'].labelToIdx[token]
# untrained_word_emb.weight.data[untrained_id].copy_(pretrained_word_emb.weight.data[pretrained_id])
#
# self.model.generator[0].linear.bias.data[untrained_id].copy_(pretrained_model
# .generator[0].linear.bias.data[
# pretrained_id])
# n_copies += 1
#
# self.print("Copied embedding for %d words" % n_copies)
# self.model.decoder.word_lut = untrained_word_emb
#
# # now we load the language embeddings ...
# if pretrained_lang_emb and untrained_lang_emb and 'langs' in chkpoint_dict:
# for lang in self.dicts['langs']:
#
# untrained_id = self.dicts['langs'][lang]
# if lang in chkpoint_dict['langs']:
# pretrained_id = chkpoint_dict['langs'][lang]
# untrained_lang_emb.weight.data[untrained_id].copy_(pretrained_lang_emb.weight.data[pretrained_id])
#
# self.model.decoder.language_embeddings = untrained_lang_emb
def warm_up(self):
return
# """
# Warmup the memory allocator, by attempting to fit the largest batch
# :return:
# """
#
# # if self.opt.memory_profiling:
# # from pytorch_memlab import MemReporter
# # reporter = MemReporter()
# #
# batch = self.train_data[0].get_largest_batch() if isinstance(self.train_data, list) \
# else self.train_data.get_largest_batch()
# opt = self.opt
#
# if self.cuda:
# batch.cuda(fp16=False)
#
# self.model.train()
# self.loss_function.train()
# self.model.zero_grad()
# oom = False
#
# if self.opt.memory_profiling:
# self.print("Input size: ")
# self.print(batch.size, batch.src_size, batch.tgt_size)
#
# if opt.streaming:
# streaming_state = self.model.init_stream()
# else:
# streaming_state = None
#
# try:
# with autocast():
# targets = batch.get('target_output')
# tgt_mask = None
# outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
# zero_encoder=opt.zero_encoder,
# mirror=opt.mirror_loss, streaming_state=streaming_state,
# nce=opt.nce)
#
# outputs['tgt_mask'] = tgt_mask
#
# loss_dict = self.loss_function(outputs, targets, model=self.model)
# loss_data = loss_dict['data']
# loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
# full_loss = loss
#
# if opt.ctc_loss > 0.0:
# ctc_loss = self.ctc_loss_function(outputs, targets)
# ctc_loss_data = ctc_loss.item()
# full_loss = full_loss + opt.ctc_loss * ctc_loss
#
# if opt.mirror_loss:
# rev_loss = loss_dict['rev_loss']
# mirror_loss = loss_dict['mirror_loss']
# full_loss = full_loss + rev_loss + mirror_loss
#
# # reconstruction loss
# if opt.reconstruct:
# rec_loss = loss_dict['rec_loss']
# rec_loss = rec_loss
# full_loss = full_loss + rec_loss
#
# if opt.lfv_multilingual:
# lid_logits = outputs['lid_logits']
# lid_labels = batch.get('target_lang')
# lid_loss_function = self.loss_function.get_loss_function('lid_loss')
# lid_loss = lid_loss_function(lid_logits, lid_labels)
# full_loss = full_loss + lid_loss
#
# optimizer = self.optim.optimizer
#
# if self.opt.memory_profiling:
# reporter.report(verbose=True)
#
# # for obj in gc.get_objects():
# # try:
# # if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# # # print(varname(obj))
# # # we can rule out parameter cost later
# # # if 'parameter' not in type(obj):
# # # if len(obj.shape) == 3:
# # # if not isinstance(obj, torch.nn.parameter.Parameter):
# # # tensor = obj
# # # numel = tensor.
# # print(type(obj), obj.type(), obj.size())
# # except:
# # pass
#
# # print("Memory profiling complete.")
# # print(torch.cuda.memory_summary())
# # exit()
#
# self.grad_scaler.scale(full_loss).backward()
# # if self.cuda:
# # with amp.scale_loss(full_loss, optimizer) as scaled_loss:
# # scaled_loss.backward()
# # else:
# # loss.div_(batch.tgt_size).backward()
#
# if self.opt.memory_profiling:
# print('========= after backward =========')
# reporter.report(verbose=True)
#
# self.model.zero_grad()
# self.optim.zero_grad()
# # self.optim.step()
# # self.optim.reset()
#
# except RuntimeError as e:
# if 'out of memory' in str(e):
# oom = True
# else:
# raise e
#
# if oom:
# print("[INFO] Warning: out-of-memory in warming up. "
# "This is due to the largest batch is too big for the GPU.",
# flush=True)
# else:
# self.print("[INFO] Warming up successfully.", flush=True)
#
# if self.opt.memory_profiling:
# if hasattr(torch.cuda, 'memory_summary'):
# print(torch.cuda.memory_summary())
# exit()
# maybe save by accuracy?
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state_dict = self.model.module.state_dict()
else:
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'scaler': self.grad_scaler.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def eval(self, data):
self.print("[INFO] Running evaluation...", flush=True)
opt = self.opt
rank = self.rank
world_size = self.world_size
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, rank, world_size, seed=self.opt.seed,
num_workers=1, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
data_size = len(epoch_iterator)
i = 0
self.model.eval()
self.loss_function.eval()
# self.model.module.reset_states()
total_loss = zero_tensor()
total_words = zero_tensor()
total_correct = zero_tensor()
with torch.no_grad():
while not data_iterator.end_of_epoch():
samples = next(epoch_iterator)
if samples:
with autocast():
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target')
# tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch)
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
numel = loss_dict['numel']
n_correct = loss_dict['n_correct']
total_loss.add_(loss_data)
total_words.add_(numel)
total_correct.add_(n_correct)
i = i + 1
# allreduce the total loss and total words from other processes
self.all_reduce(total_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_correct, op=dist.ReduceOp.SUM, group=self.group)
self.model.train()
self.loss_function.train()
accuracy = total_correct.item() / total_words.item()
loss = total_loss / total_words
output = {'loss': loss, 'accuracy': accuracy}
return output
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
# Clear the gradients of the model
self.model.zero_grad()
# self.model.module.reset_states()
dataset = train_data
data_iterator = generate_data_iterator(dataset, self.rank, self.world_size,
seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size)
# TODO: fix resume which is currently buggy
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = zero_tensor(), zero_tensor(), zero_tensor()
total_correct = zero_tensor()
report_mpc_loss, report_mpc_numel = zero_tensor(), zero_tensor()
report_loss, report_tgt_words = zero_tensor(), zero_tensor()
report_correct = zero_tensor()
report_src_words = zero_tensor()
report_rec_loss, report_rev_loss, report_mirror_loss = zero_tensor(), zero_tensor(), zero_tensor()
start = time.time()
n_samples = len(data_iterator)
counter = 0
num_accumulated_words = zero_tensor()
num_accumulated_sents = zero_tensor()
grad_div = 1
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
i = i * self.world_size
numel = 0
while not data_iterator.end_of_epoch():
# this batch generator is not very clean atm
# TODO: move everything to the multiGPU trainer
samples = next(epoch_iterator)
batch = prepare_sample(samples, device=self.device)
# TODO: dealing with oom during distributed training
oom = zero_tensor()
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
counter = counter + 1
# reduction_disabled = False if counter >= opt.update_frequency or i == (n_samples - 1) else True
reduce = True if counter >= opt.update_frequency or i == (n_samples - 1) else False
def maybe_no_sync():
if not reduce and isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
# when we dont reach the updating step, we do not need to synchronize the gradients
# thus disabling the backward grad sync to improve speed
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
with autocast():
targets = batch.get('target')
# tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch)
batch_size = batch.size
# outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
numel = loss_dict['numel']
n_correct = loss_dict['n_correct']
full_loss = loss
# # Todo: MPC loss
if self.opt.mpc:
mpc_loss_dict = self.mpc_loss(outputs)
mpc_loss_data = mpc_loss_dict['data']
mpc_loss = mpc_loss_dict['loss']
mpc_numel = mpc_loss_dict['numel']
# mpc_loss_data = 0
# mpc_numel = 0
full_loss = full_loss + 0.0001 * mpc_loss
else:
mpc_loss_data = 0
mpc_numel = 0
# grad scaler has to be done outside of the autocast
# this line basically equals full_loss.mul_(some_scale).backward()
# which means the grad scaler doesn't internally change
self.grad_scaler.scale(full_loss).backward()
del outputs
batch_size = batch.size
src_size = batch.src_size
tgt_size = numel
num_accumulated_words.add_(numel)
num_accumulated_sents.add_(batch_size)
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency:
update_flag = True
elif i == n_samples - 1: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
# self.all_reduce(num_accumulated_words, op=dist.ReduceOp.SUM, group=self.group)
grad_denom = 1.0 / grad_div
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words.item() * grad_denom
else:
grad_denom = 1
# the gradient is scaled by world size, so in order to match the model without multiGPU
# we rescale the model parameters w.r.t the world size
grad_denom = grad_denom / self.world_size
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(self.model.parameters(), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
self.grad_scaler.unscale_(self.optim.optimizer)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt.max_grad_norm)
self.optim.step(scaler=self.grad_scaler)
self.grad_scaler.update()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
num_accumulated_words.zero_()
num_accumulated_sents.zero_()
num_updates = self.optim._step
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_output = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_output['loss'], 100))
if self.is_main():
print('Validation perplexity: %g' % valid_ppl)
print('Validation accuracy: %g' % valid_output['accuracy'])
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, 1 - valid_output['accuracy'], itr=data_iterator)
num_words = tgt_size
report_loss.add_(loss_data)
report_correct.add_(n_correct)
report_tgt_words.add_(numel)
report_src_words.add_(src_size)
total_loss.add_(loss_data)
total_words.add_(num_words)
report_mpc_loss.add_(mpc_loss_data)
report_mpc_numel.add_(mpc_numel)
# total_tokens += batch.get('target_output').nelement()
# total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
# batch_efficiency = total_non_pads / total_tokens
# control the index a little bit to ensure the log is always printed
if i == 0 or ((i + 1) % opt.log_interval < self.world_size):
self.all_reduce(report_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_tgt_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_src_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_correct, op=dist.ReduceOp.SUM, group=self.group)
if self.is_main():
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss.item() / report_tgt_words.item())))
assert report_correct.item() <= report_tgt_words.item()
log_string += ("accuracy: %6.4f; " %
(report_correct.item() / report_tgt_words.item()))
if opt.mpc:
log_string += ("mpc loss: %6.6f; " %
(report_mpc_loss.item() / report_mpc_numel.item() ))
log_string += ("lr: %.7f ; updates: %7d; " %
(self.optim.get_learning_rate(),
self.optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words.item() / (time.time() - start),
report_tgt_words.item() / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
self.print(log_string, flush=True)
report_loss.zero_()
report_tgt_words.zero_()
report_src_words.zero_()
report_rec_loss.zero_()
report_rev_loss.zero_()
report_mirror_loss.zero_()
report_correct.zero_()
report_mpc_loss.zero_()
report_mpc_numel.zero_()
start = time.time()
# increase i by world size
i = i + self.world_size
return total_loss / total_words
# def run(self, save_file=None):
def run(self, checkpoint=None):
opt = self.opt
if checkpoint is not None:
# TODO: have loading checkpoints for each process
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
# Only load the progress when we use the same optimizer
# if 'itr' in checkpoint:
# itr_progress = checkpoint['itr']
# else:
itr_progress = None
resume = True
start_epoch = math.floor(checkpoint['epoch']) if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
# optim_state_dict = checkpoint['optim']
# # del checkpoint['optim']
del checkpoint
else:
itr_progress = None
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
#
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
valid_output = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_output['loss'], 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl, flush=True)
print('[INFO] Validation accuracy: %g' % valid_output['accuracy'], flush=True)
self.start_time = time.time()
for epoch in range(start_epoch, start_epoch + opt.epochs):
self.print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
self.print('[INFO] Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_output = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_output['loss'], 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl)
print('[INFO] Validation accuracy: %g' % valid_output['accuracy'], flush=True)
self.save(epoch, 1 - valid_output['accuracy'])
itr_progress = None
resume = False
| 31,146 | 38.576874 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/bayes_by_backprop_trainer.py | from __future__ import division
import datetime
import gc
import inspect_model
import math
import os
import re
import time
import torch
from apex import amp
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.model_factory import init_model_parameters
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients
from .trainer import BaseTrainer
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
class BayesianTrainer(BaseTrainer):
def __init__(self, model, loss_function, train_data, valid_data, dicts, opt, setup_optimizer=True):
super().__init__(model, loss_function, train_data, valid_data, dicts, opt)
if self.cuda:
torch.cuda.set_device(self.opt.gpus[0])
if self.opt.seed >= 0:
torch.manual_seed(self.opt.seed)
self.loss_function = self.loss_function.cuda()
self.model = self.model.cuda()
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if not self.opt.fp16:
opt_level = "O0"
keep_batchnorm_fp32 = False
elif self.opt.fp16_mixed:
opt_level = "O1"
keep_batchnorm_fp32 = None
else:
opt_level = "O2"
keep_batchnorm_fp32 = False
if self.cuda:
self.model, self.optim.optimizer = amp.initialize(self.model,
self.optim.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale="dynamic",
verbosity=1 if self.opt.verbose else 0)
# An ugly hack to switch between align right and align left
if hasattr(self.model, 'relative'):
if self.model.relative:
self.train_data.src_align_right = True
self.train_data.tgt_align_right = False
self.valid_data.src_align_right = True
self.valid_data.tgt_align_right = False
def warm_up(self):
"""
Warmup the memory allocator, by attempting to fit the largest batch
:return:
"""
if self.opt.memory_profiling:
from pytorch_memlab import MemReporter
reporter = MemReporter()
batch = self.train_data.get_largest_batch()
opt = self.opt
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
self.model.train()
self.model.zero_grad()
oom = False
if self.opt.memory_profiling:
print("Input size: ")
print(batch.size, batch.src_size, batch.tgt_size)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
try:
targets = batch.get('target_output')
tgt_mask = targets.data.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
log_prior = self.model.log_prior()
log_variational_posterior = self.model.log_variational_posterior()
full_loss = loss + (log_variational_posterior - log_prior)
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
optimizer = self.optim.optimizer
if self.opt.memory_profiling:
reporter.report(verbose=True)
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# # print(varname(obj))
# # we can rule out parameter cost later
# # if 'parameter' not in type(obj):
# # if len(obj.shape) == 3:
# # if not isinstance(obj, torch.nn.parameter.Parameter):
# # tensor = obj
# # numel = tensor.
# print(type(obj), obj.type(), obj.size())
# except:
# pass
# print("Memory profiling complete.")
# print(torch.cuda.memory_summary())
# exit()
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if self.opt.memory_profiling:
print('========= after backward =========')
reporter.report(verbose=True)
except RuntimeError as e:
if 'out of memory' in str(e):
oom = True
else:
raise e
if oom:
print("* Warning: out-of-memory in warming up. This is due to the largest batch is too big for the GPU")
else:
print("* Warming up successuflly.")
if self.opt.memory_profiling:
if hasattr(torch.cuda, 'memory_summary'):
print(torch.cuda.memory_summary())
exit()
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'additional_batch_order': getattr(self, 'additional_batch_order', None),
'additional_data_iteration': getattr(self, 'additional_data_iteration', None),
'amp': amp.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def eval(self, data):
total_loss = 0
total_words = 0
opt = self.opt
data_iterator = DataIterator(data, data.collater, data.batches, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
self.model.eval()
self.loss_function.eval()
self.model.reset_states()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
""" PyTorch semantics: save space by not creating gradients """
data_size = len(epoch_iterator)
i = 0
with torch.no_grad():
# for i in range(len()):
while not data_iterator.end_of_epoch():
# batch = data.next()[0]
batch = next(epoch_iterator)
batch = rewrap(batch)
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
""" outputs can be either
hidden states from decoder or
prob distribution from decoder generator
"""
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
mirror=opt.mirror_loss, streaming_state=streaming_state)
if opt.streaming:
streaming_state = outputs['streaming_state']
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
total_loss += loss_data
total_words += batch.tgt_size
i = i + 1
self.model.train()
self.loss_function.train()
return total_loss / total_words
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
self.model.train()
self.loss_function.train()
# Clear the gradients of the model
# self.runner.zero_grad()
self.model.zero_grad()
self.model.reset_states()
dataset = train_data
data_iterator = DataIterator(dataset, dataset.collater, dataset.batches, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=epoch, buffer_size=opt.buffer_size)
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = 0, 0, 0
total_non_pads = 0
report_loss, report_tgt_words = 0, 0
report_src_words = 0
report_sents = 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
report_log_prior = 0
report_log_variational_posterior = 0
start = time.time()
n_samples = len(epoch_iterator)
counter = 0
update_counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
nan = False
nan_counter = 0
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
i = data_iterator.iterations_in_epoch
while not data_iterator.end_of_epoch():
curriculum = (epoch < opt.curriculum)
batch = next(epoch_iterator)
batch = rewrap(batch)
grad_scaler = self.opt.batch_size_words if self.opt.update_frequency > 1 else batch.tgt_size
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
oom = False
try:
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
targets = batch.get('target_output')
tgt_mask = targets.data.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state)
batch_size = batch.size
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
log_prior = self.model.log_prior()
log_variational_posterior = self.model.log_variational_posterior()
# the coeff starts off at 1 for each epoch
# from BBB paper: The first mini batches in each epoch have large KL coeff
# # the later minibatches are influenced by the data
# denom = math.pow(1.5, min(32, update_counter))
# min_coeff = 1 / (self.opt.model_size ** 2)
# kl_coeff = max(1 / denom, min_coeff)
kl_coeff = 1 / (batch.tgt_size * opt.update_frequency)
# kl_coeff = 1 / (self.opt.model_size ** 2)
# kl_coeff = 1
full_loss = loss + kl_coeff * (log_variational_posterior - log_prior)
# print(log_variational_posterior, log_prior)
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
rev_loss_data = loss_dict['rev_loss_data']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
mirror_loss_data = loss_dict['mirror_loss'].item()
else:
rev_loss = None
rev_loss_data = None
mirror_loss_data = 0
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
rec_loss_data = loss_dict['rec_loss_data']
else:
rec_loss_data = None
optimizer = self.optim.optimizer
# When the batch size is large, each gradient step is very easy to explode on fp16
# Normalizing the loss to grad scaler ensures this will not happen
full_loss.div_(grad_scaler)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
full_loss.backward()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU , skipping batch')
oom = True
torch.cuda.empty_cache()
loss = 0
if opt.streaming: # reset stream in this case ...
streaming_state = self.model.init_stream()
else:
raise e
if loss != loss:
# catching NAN problem
oom = True
self.model.zero_grad()
self.optim.zero_grad()
num_accumulated_words = 0
num_accumulated_sents = 0
nan_counter = nan_counter + 1
print("Warning!!! Loss is Nan")
if nan_counter >= 15:
raise ValueError("Training stopped because of multiple NaN occurence. "
"For ASR, using the Relative Transformer is more stable and recommended.")
else:
nan_counter = 0
if not oom:
src_size = batch.src_size
tgt_size = batch.tgt_size
counter = counter + 1
num_accumulated_words += tgt_size
num_accumulated_sents += batch_size
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency > 0:
update_flag = True
elif 0 < opt.batch_size_update <= num_accumulated_words:
update_flag = True
elif i == n_samples: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
if (counter == 1 and self.opt.update_frequency != 1) or counter > 1:
grad_denom = 1 / grad_scaler
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words * grad_denom
else:
grad_denom = 1
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(amp.master_params(optimizer), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.opt.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
num_updates = self.optim._step
update_counter += 1
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl, itr=data_iterator)
num_words = tgt_size
report_loss += loss_data
report_log_prior += log_prior.item()
report_log_variational_posterior += log_variational_posterior.item()
report_tgt_words += num_words
report_src_words += src_size
report_sents += 1
total_loss += loss_data
total_words += num_words
total_tokens += batch.get('target_output').nelement()
total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
optim = self.optim
batch_efficiency = total_non_pads / total_tokens
if opt.reconstruct:
report_rec_loss += rec_loss_data
if opt.mirror_loss:
report_rev_loss += rev_loss_data
report_mirror_loss += mirror_loss_data
if i == 0 or (i % opt.log_interval == -1 % opt.log_interval):
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss / report_tgt_words)))
kl_div = report_log_variational_posterior - report_log_prior
log_string += ("KL q||p: %6.2f ; " % (kl_div / report_sents))
if opt.reconstruct:
rec_ppl = math.exp(report_rec_loss / report_src_words.item())
log_string += (" rec_ppl: %6.2f ; " % rec_ppl)
if opt.mirror_loss:
rev_ppl = math.exp(report_rev_loss / report_tgt_words)
log_string += (" rev_ppl: %6.2f ; " % rev_ppl)
# mirror loss per word
log_string += (" mir_loss: %6.2f ; " % (report_mirror_loss / report_tgt_words))
log_string += ("lr: %.7f ; updates: %7d; " %
(optim.getLearningRate(),
optim._step))
log_string += ("%5.0f src/s; %5.0f tgt/s; " %
(report_src_words / (time.time() - start),
report_tgt_words / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
print(log_string)
report_loss = 0
report_tgt_words, report_src_words = 0, 0
report_sents = 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
report_log_prior, report_log_variational_posterior = 0, 0
start = time.time()
i = i + 1
return total_loss / total_words
# def run(self, save_file=None):
def run(self, checkpoint=None):
opt = self.opt
model = self.model
optim = self.optim
if checkpoint is not None:
self.model.load_state_dict(checkpoint['model'])
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
self.optim.load_state_dict(checkpoint['optim'])
if prec_opt is not None and hasattr(prec_opt, "fp16_mixed"):
# Only load amp information if the mode is the same
# Maybe its better to change between optimization mode?
if opt.fp16_mixed == prec_opt.fp16_mixed and opt.fp16 == prec_opt.fp16:
if 'amp' in checkpoint:
amp.load_state_dict(checkpoint['amp'])
# Only load the progress when we use the same optimizer
if 'itr' in checkpoint:
itr_progress = checkpoint['itr']
else:
itr_progress = None
opt.start_epoch = int(math.floor(float(checkpoint['epoch'] + 1)))
resume = True
else:
itr_progress = None
resume = False
del checkpoint['model']
del checkpoint['optim']
del checkpoint
else:
itr_progress = None
print('Initializing model parameters')
init_model_parameters(model, opt)
resume = False
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
if self.cuda:
self.warm_up()
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
self.start_time = time.time()
for epoch in range(opt.start_epoch, opt.start_epoch + opt.epochs):
print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
print('Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
self.save(epoch, valid_ppl)
itr_progress = None
resume = False
| 23,966 | 38.35468 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/accent_gan_trainer.py | from __future__ import division
import datetime
import gc
import inspect_model
import math
import os
import re
import time
import torch
from apex import amp
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.model_factory import init_model_parameters
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients
from .trainer import BaseTrainer
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def generate_data_iterator(dataset, seed, num_workers=1, epoch=1., buffer_size=0):
# check if dataset is a list:
if isinstance(dataset, list):
# this is a multidataset
data_iterator = MultiDataIterator(dataset, seed=seed, num_workers=num_workers,
epoch=epoch, buffer_size=buffer_size)
else:
data_iterator = DataIterator(dataset, dataset.collater, dataset.batches, seed=seed,
num_workers=num_workers, epoch=epoch, buffer_size=buffer_size)
return data_iterator
class SpeechAETrainer(BaseTrainer):
def __init__(self, model, loss_function, train_data, valid_data, dicts, opt, setup_optimizer=True):
super().__init__(model, loss_function, train_data, valid_data, dicts, opt)
self.n_gpus = len(self.opt.gpus)
if self.cuda:
torch.cuda.set_device(self.opt.gpus[0])
if self.opt.seed >= 0:
torch.manual_seed(self.opt.seed)
self.loss_function = self.loss_function.cuda()
self.model = self.model.cuda()
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if not self.opt.fp16:
opt_level = "O0"
keep_batchnorm_fp32 = False
elif self.opt.fp16_mixed:
opt_level = "O1"
keep_batchnorm_fp32 = None
else:
opt_level = "O2"
keep_batchnorm_fp32 = False
if self.cuda:
self.model, self.optim.optimizer = amp.initialize(self.model,
self.optim.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale="dynamic",
verbosity=1 if self.opt.verbose else 0)
def warm_up(self):
"""
Warmup the memory allocator, by attempting to fit the largest batch
:return:
"""
print("Tacotron_warmup")
if self.opt.memory_profiling:
from pytorch_memlab import MemReporter
reporter = MemReporter()
batch = self.train_data[0].get_largest_batch() if isinstance(self.train_data, list) \
else self.train_data.get_largest_batch()
opt = self.opt
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
self.model.train()
self.model.zero_grad()
oom = False
if self.opt.memory_profiling:
print("Input size: ")
print(batch.size, batch.src_size, batch.tgt_size)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
try:
targets = batch.get('target_output')
tgt_mask = None
outputs = self.model(batch)
gate_padded = batch.get('gate_padded')
if self.opt.n_frames_per_step > 1:
slice = torch.arange(self.opt.n_frames_per_step - 1, gate_padded.size(1), self.opt.n_frames_per_step)
gate_padded = gate_padded[:, slice]
src_org = batch.get('source_org')
src_org = src_org.narrow(2, 1, src_org.size(2) - 1)
target = [src_org.permute(1,2,0).contiguous(), gate_padded]
loss = self.loss_function(outputs, target)
# loss_dict = self.loss_function(outputs, targets, model=self.model)
loss = loss # a little trick to avoid gradient overflow with fp16
full_loss = loss
optimizer = self.optim.optimizer
if self.opt.memory_profiling:
reporter.report(verbose=True)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.div_(batch.tgt_size).backward()
if self.opt.memory_profiling:
print('========= after backward =========')
reporter.report(verbose=True)
self.model.zero_grad()
self.optim.zero_grad()
except RuntimeError as e:
if 'out of memory' in str(e):
oom = True
else:
raise e
if oom:
print("* Warning: out-of-memory in warming up. This is due to the largest batch is too big for the GPU.")
else:
print("* Warming up successuflly.")
if self.opt.memory_profiling:
if hasattr(torch.cuda, 'memory_summary'):
print(torch.cuda.memory_summary())
exit()
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'amp': amp.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def run(self, checkpoint=None):
opt = self.opt
model = self.model
optim = self.optim
if checkpoint is not None:
self.model.load_state_dict(checkpoint['model'])
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
print("* Loading optimizer states ... ")
self.optim.load_state_dict(checkpoint['optim'])
if prec_opt is not None and hasattr(prec_opt, "fp16_mixed"):
# Only load amp information if the mode is the same
# Maybe its better to change between optimization mode?
if opt.fp16_mixed == prec_opt.fp16_mixed and opt.fp16 == prec_opt.fp16:
if 'amp' in checkpoint:
amp.load_state_dict(checkpoint['amp'])
# Only load the progress when we use the same optimizer
if 'itr' in checkpoint:
itr_progress = checkpoint['itr']
else:
itr_progress = None
resume = True
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
del checkpoint['model']
del checkpoint['optim']
del checkpoint
else:
itr_progress = None
print('Initializing model parameters')
init_model_parameters(model, opt)
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
if self.cuda:
self.warm_up()
valid_loss = self.eval(self.valid_data)
print('Validation loss: %g' % valid_loss)
self.start_time = time.time()
for epoch in range(start_epoch, start_epoch + opt.epochs):
print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
print('Train loss: %g' % train_loss)
# (2) evaluate on the validation set
valid_loss = self.eval(self.valid_data)
print('Validation loss: %g' % valid_loss)
self.save(epoch, valid_loss)
itr_progress = None
resume = False
def eval(self, data):
total_loss = 0
total_tgt_frames = 0
total_sent = 0
opt = self.opt
self.model.eval()
self.loss_function.eval()
# self.model.reset_states()
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
""" PyTorch semantics: save space by not creating gradients """
data_size = len(epoch_iterator)
i = 0
with torch.no_grad():
# for i in range(len()):
while not data_iterator.end_of_epoch():
# batch = data.next()[0]
batch = next(epoch_iterator)
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
""" outputs can be either
hidden states from decoder or
prob distribution from decoder generator
"""
outputs = self.model(batch)
gate_padded = batch.get('gate_padded')
if self.opt.n_frames_per_step > 1:
slice = torch.arange(self.opt.n_frames_per_step - 1, gate_padded.size(1), self.opt.n_frames_per_step)
gate_padded = gate_padded[:, slice]
src_org = batch.get('source_org')
src_org = src_org.narrow(2, 1, src_org.size(2) - 1)
target = [src_org.permute(1, 2, 0).contiguous(), gate_padded]
loss = self.loss_function(outputs, target)
loss_data = loss.data.item()
total_loss += loss_data
total_tgt_frames += batch.src_size
total_sent += batch.size
i = i + 1
self.model.train()
self.loss_function.train()
return total_loss / data_size * 100
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
self.model.train()
self.loss_function.train()
# Clear the gradients of the model
# self.runner.zero_grad()
self.model.zero_grad()
dataset = train_data
data_iterator = generate_data_iterator(dataset, seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size)
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_loss, total_frames = 0, 0
report_loss, report_tgt_frames,report_sent = 0, 0, 0
start = time.time()
n_samples = len(epoch_iterator)
counter = 0
num_accumulated_sents = 0
grad_scaler = -1
nan = False
nan_counter = 0
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
while not data_iterator.end_of_epoch():
curriculum = (epoch < opt.curriculum)
# this batch generator is not very clean atm
batch = next(epoch_iterator)
if isinstance(batch, list) and self.n_gpus == 1:
batch = batch[0]
batch = rewrap(batch)
if grad_scaler == -1:
grad_scaler = 1 # if self.opt.update_frequency > 1 else batch.tgt_size
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
oom = False
try:
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
# targets = batch.get('target_output')
# tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch)
gate_padded = batch.get('gate_padded')
if self.opt.n_frames_per_step > 1:
slice = torch.arange(0, gate_padded.size(1), self.opt.n_frames_per_step)
gate_padded = gate_padded[:, slice]
src_org = batch.get('source_org')
src_org = src_org.narrow(2, 1, src_org.size(2) - 1)
target = [src_org.permute(1, 2, 0).contiguous(), gate_padded]
loss = self.loss_function(outputs, target)
batch_size = batch.size
loss_data = loss.data.item()
# a little trick to avoid gradient overflow with fp16
full_loss = loss
optimizer = self.optim.optimizer
# When the batch size is large, each gradient step is very easy to explode on fp16
# Normalizing the loss to grad scaler ensures this will not happen
full_loss.div_(grad_scaler)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
full_loss.backward()
del outputs
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU , skipping batch')
oom = True
torch.cuda.empty_cache()
loss = 0
if opt.streaming: # reset stream in this case ...
streaming_state = self.model.init_stream()
else:
raise e
if loss != loss:
# catching NAN problem
oom = True
self.model.zero_grad()
self.optim.zero_grad()
nan_counter = nan_counter + 1
print("Warning!!! Loss is Nan")
if nan_counter >= 15:
raise ValueError("Training stopped because of multiple NaN occurence. "
"For ASR, using the Relative Transformer is more stable and recommended.")
else:
nan_counter = 0
if not oom:
src_size = batch.src_size
counter = counter + 1
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency > 0:
update_flag = True
elif i == n_samples: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
if (counter == 1 and self.opt.update_frequency != 1) or counter > 1:
grad_denom = 1 / grad_scaler
# if self.opt.normalize_gradient:
# grad_denom = num_accumulated_words * grad_denom
else:
grad_denom = 1.0
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(amp.master_params(optimizer), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.opt.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
# num_accumulated_words = 0
grad_scaler = -1
num_updates = self.optim._step
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl, itr=data_iterator)
report_loss += loss_data
# report_tgt_words += num_words
num_accumulated_sents += batch_size
report_sent += batch_size
total_frames+= src_size
report_tgt_frames += src_size
total_loss += loss_data
optim = self.optim
# batch_efficiency = total_non_pads / total_tokens
if i == 0 or (i % opt.log_interval == -1 % opt.log_interval):
log_string = ("Epoch %2d, %5d/%5d; ; loss : %6.2f ; " %
(epoch, i + 1, len(data_iterator),
report_loss ))
log_string += ("lr: %.7f ; updates: %7d; " %
(optim.getLearningRate(),
optim._step))
#
log_string += ("%5.0f src tok/s " %
(report_tgt_frames / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
print(log_string)
report_loss = 0
report_tgt_frames = 0
report_sent = 0
start = time.time()
i = i + 1
return total_loss / n_samples * 100
class XETrainer(BaseTrainer):
def __init__(self, model, loss_function, train_data, valid_data, dicts, opt, setup_optimizer=True):
super().__init__(model, loss_function, train_data, valid_data, dicts, opt)
if opt.lfv_multilingual or opt.lid_loss:
from onmt.models.speech_recognizer.lid_loss import CrossEntropyLIDLoss
lid_loss = CrossEntropyLIDLoss(opt.n_languages, opt.label_smoothing, opt.fast_xentropy)
self.loss_function.add_loss_function(lid_loss, 'lid_loss')
self.n_gpus = len(self.opt.gpus)
if self.cuda:
torch.cuda.set_device(self.opt.gpus[0])
if self.opt.seed >= 0:
torch.manual_seed(self.opt.seed)
self.loss_function = self.loss_function.cuda()
self.model = self.model.cuda()
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if not self.opt.fp16:
opt_level = "O0"
keep_batchnorm_fp32 = False
elif self.opt.fp16_mixed:
opt_level = "O1"
keep_batchnorm_fp32 = None
else:
opt_level = "O2"
keep_batchnorm_fp32 = False
if self.cuda:
# print(234)
self.model, self.optim.optimizer = amp.initialize(self.model,
self.optim.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale="dynamic",
verbosity=1 if self.opt.verbose else 0)
# An ugly hack to switch between align right and align left
if hasattr(self.model, 'relative'):
if self.model.relative:
self.train_data.src_align_right = True
self.train_data.tgt_align_right = False
self.valid_data.src_align_right = True
self.valid_data.tgt_align_right = False
self.valid_data.tgt_align_right = False
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'amp': amp.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def eval(self, data):
total_loss = 0
total_words = 0
opt = self.opt
self.model.eval()
self.loss_function.eval()
self.model.reset_states()
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
""" PyTorch semantics: save space by not creating gradients """
data_size = len(epoch_iterator)
i = 0
with torch.no_grad():
# for i in range(len()):
while not data_iterator.end_of_epoch():
# batch = data.next()[0]
batch = next(epoch_iterator)
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
""" outputs can be either
hidden states from decoder or
prob distribution from decoder generator
"""
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
mirror=opt.mirror_loss, streaming_state=streaming_state, nce=opt.nce)
if opt.streaming:
streaming_state = outputs['streaming_state']
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
total_loss += loss_data
total_words += batch.tgt_size
i = i + 1
self.model.train()
self.loss_function.train()
return total_loss / total_words
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
self.model.train()
self.loss_function.train()
# Clear the gradients of the model
# self.runner.zero_grad()
self.model.zero_grad()
self.model.reset_states()
dataset = train_data
data_iterator = generate_data_iterator(dataset, seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size)
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = 0, 0, 0
total_non_pads = 0
report_loss, report_tgt_words = 0, 0
report_src_words = 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
start = time.time()
n_samples = len(epoch_iterator)
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
grad_scaler = -1
nan = False
nan_counter = 0
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
while not data_iterator.end_of_epoch():
curriculum = (epoch < opt.curriculum)
# this batch generator is not very clean atm
batch = next(epoch_iterator)
if isinstance(batch, list) and self.n_gpus == 1:
batch = batch[0]
batch = rewrap(batch)
if grad_scaler == -1:
grad_scaler = 1 # if self.opt.update_frequency > 1 else batch.tgt_size
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
# if opt.streaming:
# if train_data.is_new_stream():
# streaming_state = self.model.init_stream()
# else:
# streaming_state = None
oom = False
try:
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce)
# print("time " + str(time.time() - start_time_t))
batch_size = batch.size
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
rev_loss_data = loss_dict['rev_loss_data']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
mirror_loss_data = loss_dict['mirror_loss'].item()
else:
rev_loss_data = None
mirror_loss_data = 0
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
rec_loss_data = loss_dict['rec_loss_data']
else:
rec_loss_data = None
if opt.lfv_multilingual or opt.lid_loss:
lid_logits = outputs['lid_logits']
lid_labels = batch.get('target_lang')
lid_loss_function = self.loss_function.get_loss_function('lid_loss')
lid_loss = lid_loss_function([lid_logits.unsqueeze(0)] , lid_labels)
full_loss = full_loss + lid_loss
optimizer = self.optim.optimizer
# When the batch size is large, each gradient step is very easy to explode on fp16
# Normalizing the loss to grad scaler ensures this will not happen
full_loss.div_(grad_scaler)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
full_loss.backward()
del outputs
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU , skipping batch')
oom = True
torch.cuda.empty_cache()
loss = 0
if opt.streaming: # reset stream in this case ...
streaming_state = self.model.init_stream()
else:
raise e
if loss != loss:
# catching NAN problem
oom = True
self.model.zero_grad()
self.optim.zero_grad()
num_accumulated_words = 0
num_accumulated_sents = 0
nan_counter = nan_counter + 1
print("Warning!!! Loss is Nan")
if nan_counter >= 15:
raise ValueError("Training stopped because of multiple NaN occurence. "
"For ASR, using the Relative Transformer is more stable and recommended.")
else:
nan_counter = 0
if not oom:
src_size = batch.src_size
tgt_size = batch.tgt_size
counter = counter + 1
num_accumulated_words += tgt_size
num_accumulated_sents += batch_size
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency > 0:
update_flag = True
elif 0 < opt.batch_size_update <= num_accumulated_words:
update_flag = True
elif i == n_samples: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
if (counter == 1 and self.opt.update_frequency != 1) or counter > 1:
grad_denom = 1 / grad_scaler
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words * grad_denom
else:
grad_denom = 1
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(amp.master_params(optimizer), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.opt.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
grad_scaler = -1
num_updates = self.optim._step
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl, itr=data_iterator)
num_words = tgt_size
report_loss += loss_data
report_tgt_words += num_words
report_src_words += src_size
total_loss += loss_data
total_words += num_words
total_tokens += batch.get('target_output').nelement()
total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
optim = self.optim
batch_efficiency = total_non_pads / total_tokens
if opt.reconstruct:
report_rec_loss += rec_loss_data
if opt.mirror_loss:
report_rev_loss += rev_loss_data
report_mirror_loss += mirror_loss_data
if i == 0 or (i % opt.log_interval == -1 % opt.log_interval):
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss / report_tgt_words)))
if opt.reconstruct:
rec_ppl = math.exp(report_rec_loss / report_src_words.item())
log_string += (" rec_ppl: %6.2f ; " % rec_ppl)
if opt.mirror_loss:
rev_ppl = math.exp(report_rev_loss / report_tgt_words)
log_string += (" rev_ppl: %6.2f ; " % rev_ppl)
# mirror loss per word
log_string += (" mir_loss: %6.2f ; " % (report_mirror_loss / report_tgt_words))
log_string += ("lr: %.7f ; updates: %7d; " %
(optim.getLearningRate(),
optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words / (time.time() - start),
report_tgt_words / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
print(log_string)
report_loss = 0
report_tgt_words, report_src_words = 0, 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
start = time.time()
i = i + 1
return total_loss / total_words
# def run(self, save_file=None):
def run(self, checkpoint=None):
opt = self.opt
model = self.model
optim = self.optim
if checkpoint is not None:
self.model.load_state_dict(checkpoint['model'])
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
print("* Loading optimizer states ... ")
self.optim.load_state_dict(checkpoint['optim'])
if prec_opt is not None and hasattr(prec_opt, "fp16_mixed"):
# Only load amp information if the mode is the same
# Maybe its better to change between optimization mode?
if opt.fp16_mixed == prec_opt.fp16_mixed and opt.fp16 == prec_opt.fp16:
if 'amp' in checkpoint:
amp.load_state_dict(checkpoint['amp'])
# Only load the progress when we use the same optimizer
if 'itr' in checkpoint:
itr_progress = checkpoint['itr']
else:
itr_progress = None
resume = True
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
del checkpoint['model']
del checkpoint['optim']
del checkpoint
else:
itr_progress = None
print('Initializing model parameters')
init_model_parameters(model, opt)
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
self.start_time = time.time()
if self.cuda:
self.warm_up()
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
# valid_loss = self.train_epoch(0)
# valid_ppl = math.exp(min(valid_loss, 100))
#
# print('Validation perplexity: %g' % valid_ppl)
for epoch in range(start_epoch, start_epoch + opt.epochs):
print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
print('Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
self.save(epoch, valid_ppl)
itr_progress = None
resume = False
| 39,445 | 36.675263 | 121 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/evaluator.py | from __future__ import division
import sys, tempfile
import onmt
import onmt.modules
#~ from onmt.metrics.gleu import sentence_gleu
#~ from onmt.metrics.sbleu import sentence_bleu
from onmt.metrics.bleu import moses_multi_bleu
#~ from onmt.utils import compute_score
import torch
import torch.nn as nn
from torch import cuda
from torch.autograd import Variable
import math
class Evaluator(object):
def __init__(self, model, dataset, opt, cuda=False):
# some properties
self.dataset = dataset
self.dicts = dataset['dicts']
self.setIDs = dataset['dicts']['setIDs']
self.model = model
self.cuda = cuda
# self.translator = onmt.InplaceTranslator(self.model, self.dicts,
# beam_size=1,
# cuda=self.cuda)
def setScore(self, score):
self.score = score
def setCriterion(self, criterion):
self.criterion = criterion
# Compute perplexity of a data given the model
# For a multilingual dataset, we may need the setIDs of the desired languages
# data is a dictionary with key = setid and value = DataSet object
def eval_perplexity(self, data, loss_function):
total_loss = 0
total_words = 0
self.model.eval()
with torch.no_grad():
for i in range(len(data)):
batch = data[i]
_, predictions = model(batch)
# exclude <s> from targets
targets = batch[1][1:]
# loss, _ = memoryEfficientLoss(
# outputs, targets, model.generator, criterion, eval=True)
total_loss += loss
total_words += targets.data.ne(onmt.constants.PAD).sum()
model.train()
return total_loss / total_words
#~ def eval_reinforce(self, data, score, verbose=False):
#~
#~ total_score = 0
#~ total_sentences = 0
#~
#~ total_hit = 0
#~ total_hit_sentences = 0
#~ total_gleu = 0
#~
#~ model = self.model
#~ model.eval()
#~ tgtDict = self.dicts['tgt']
#~ srcDict = self.dicts['src']
#~
#~ for i in range(len(data)):
#~ batch = data[i][:-1]
#~ src = batch[0]
#~ ref = batch[1][1:]
#~ # we need to sample
#~ sampled_sequence = model.sample(src, max_length=100, argmax=True)
#~ batch_size = ref.size(1)
#~
#~ for idx in xrange(batch_size):
#~
#~ tgtIds = sampled_sequence.data[:,idx]
#~
#~ tgtWords = tgtDict.convertTensorToLabels(tgtIds, onmt.Constants.EOS)
#~
#~ refIds = ref.data[:,idx]
#~
#~ refWords = tgtDict.convertTensorToLabels(refIds, onmt.Constants.EOS)
#~
#~ # return a single score value
#~ s = score(refWords, tgtWords)
#~
#~ if len(s) > 2:
#~ gleu = s[1]
#~ hit = s[2]
#~
#~ if hit >= 0:
#~ total_hit_sentences += 1
#~ total_hit += hit
#~
#~ if verbose:
#~ sampledSent = " ".join(tgtWords)
#~ refSent = " ".join(refWords)
#~
#~ if s[0] > 0:
#~ print "SAMPLE :", sampledSent
#~ print " REF :", refSent
#~ print "Score =", s
#~
#~ # bleu is scaled by 100, probably because improvement by .01 is hard ?
#~ total_score += s[0] * 100
#~
#~ total_sentences += batch_size
#~
#~ if total_hit_sentences > 0:
#~ average_hit = total_hit / total_hit_sentences
#~ print("Average HIT : %.2f" % (average_hit * 100))
#~
#~ average_score = total_score / total_sentences
#~ model.train()
#~ return average_score
# Compute translation quality of a data given the model
# def eval_translate(self, data, beam_size=1, batch_size=16, bpe=True, bpe_token="@"):
# model = self.model
# setIDs = self.setIDs
# count = 0
# one score for each language pair
# bleu_scores = dict()
# for sid in data: # sid = setid
# if self.adapt:
# if sid != self.adapt_pair:
# continue
# dset = data[sid]
# model.switchLangID(setIDs[sid][0], setIDs[sid][1])
# model.switchPairID(sid)
# tgt_lang = self.dicts['tgtLangs'][setIDs[sid][1]]
# src_lang = self.dicts['srcLangs'][setIDs[sid][0]]
# tgt_dict = self.dicts['vocabs'][tgt_lang]
# src_dict = self.dicts['vocabs'][src_lang]
# we print translations into temp files
# outF = tempfile.NamedTemporaryFile()
# outRef = tempfile.NamedTemporaryFile()
# for i in range(len(dset)):
# exclude original indices
# batch = dset[i][:-1]
# src = batch[0]
# exclude <s> from targets
# targets = batch[1][1:]
# transposed_targets = targets.data.transpose(0, 1) # bsize x nwords
# pred = self.translator.translate(src)
# bpe_string = bpe_token + bpe_token + " "
# for b in range(len(pred)):
# ref_tensor = transposed_targets[b].tolist()
# decodedSent = tgt_dict.convertToLabels(pred[b], onmt.Constants.EOS)
# decodedSent = " ".join(decodedSent)
# decodedSent = decodedSent.replace(bpe_string, '')
# refSent = tgt_dict.convertToLabels(ref_tensor, onmt.Constants.EOS)
# refSent = " ".join(refSent)
# refSent = refSent.replace(bpe_string, '')
# Flush the pred and reference sentences to temp files
# outF.write(decodedSent + "\n")
# outF.flush()
# outRef.write(refSent + "\n")
# outRef.flush()
# compute bleu using external script
# bleu = moses_multi_bleu(outF.name, outRef.name)
# outF.close()
# outRef.close()
# bleu_scores[sid] = bleu
# after decoding, switch model back to training mode
# self.model.train()
# return bleu_scores | 7,248 | 34.18932 | 95 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/gem_trainer.py | from __future__ import division
import datetime
import gc
import math
import os
import re
import time
import torch
import copy
import sys
import contextlib
import numpy as np
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.multidata_iterator import MultiDataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.model_factory import init_model_parameters
from onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients, clip_grad_norm
from onmt.model_factory import build_model, optimize_model, init_model_parameters
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP_model
from torch.cuda.amp import autocast
import warnings
from onmt.constants import add_tokenidx
import dill
# ignore the pytorch -> numpy conversion warnings
warnings.filterwarnings("ignore", category=UserWarning)
import quadprog
from .mp_trainer import prepare_sample, generate_data_iterator, zero_tensor, Trainer
def store_grad(pp, grads, grad_dims, tid):
"""
This stores parameter gradients of past tasks.
pp: parameters
grads: gradients
grad_dims: list with number of parameters per layers
tid: task id
"""
# store the gradients
grads[:, tid].fill_(0.0)
cnt = 0
for param in pp:
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg: en, tid].copy_(param.grad.data.view(-1))
cnt += 1
def overwrite_grad(pp, newgrad, grad_dims):
"""
This is used to overwrite the gradients with a new gradient
vector, whenever violations occur.
pp: parameters
newgrad: corrected gradient
grad_dims: list storing number of parameters at each layer
"""
cnt = 0
for param in pp:
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg: en].contiguous().view(
param.grad.data.size())
param.grad.data.copy_(this_grad)
cnt += 1
def project2cone2(gradient, memories, margin=0.5, eps=1e-3):
"""
Solves the GEM dual QP described in the paper given a proposed
gradient "gradient", and a memory of task gradients "memories".
Overwrites "gradient" with the final projected update.
input: gradient, p-vector
input: memories, (t * p)-vector
output: x, p-vector
"""
memories_np = memories.cpu().t().double().numpy()
gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()
t = memories_np.shape[0]
P = np.dot(memories_np, memories_np.transpose())
P = 0.5 * (P + P.transpose()) + np.eye(t) * eps
q = np.dot(memories_np, gradient_np) * -1
G = np.eye(t)
h = np.zeros(t) + margin
v = quadprog.solve_qp(P, q, G, h)[0]
x = np.dot(v, memories_np) + gradient_np
gradient.copy_(torch.Tensor(x).to(gradient.device).view(-1, 1))
def is_factorized_param(p):
if p.endswith("r_i") or p.endswith("s_i"):
return True
if p.endswith("rm_i") or p.endswith("rm_o"):
return True
if p.endswith("sm_i") or p.endswith("sm_o"):
return True
if p.endswith("r_o") or p.endswith("s_o"):
return True
if p.endswith("r_p") or p.endswith("s_p"):
return True
if p.endswith("rm_p") or p.endswith("sm_p"):
return True
if p.endswith("r_q") or p.endswith("s_q") or p.endswith("r_kv") or p.endswith("s_kv"):
return True
if p.endswith("rm_q") or p.endswith("sm_q") or p.endswith("rm_kv") or p.endswith("sm_kv"):
return True
return False
class GEMTrainer(Trainer):
def __init__(self, device, train_data, valid_data, dicts, opt, constants=None, setup_optimizer=True):
"""
:param model:
:param device: int (GPU id)
:param loss_function:
:param train_data:
:param valid_data:
:param dicts:
:param opt:
"""
super(GEMTrainer, self).__init__(device, train_data, valid_data, dicts, opt,
constants=constants, setup_optimizer=setup_optimizer)
assert isinstance(train_data, list)
assert isinstance(valid_data, list)
assert(len(opt.train_sets) > 0)
assert(len(opt.train_set_orders) > 0)
assert(len(opt.train_set_orders) == len(opt.train_sets)), "The number of train sets and the number of orders must match"
self.print("[INFO] Preparing parameters for Gradient Episodic Memory")
self.gem_params = list()
self.gem_param_names = list()
self.gem_param_size = list()
self.ft_params = list()
for n, p in self.model.named_parameters():
if is_factorized_param(n):
self.ft_params.append(n)
else:
if p.requires_grad:
self.gem_params.append(p)
self.gem_param_names.append(n)
self.gem_param_size.append(p.numel())
self.print("[INFO] Done Preparing parameters.")
# print out the stuff
# for (gem_param, gem_param_name, gem_param_size) in zip(self.gem_params, self.gem_param_names, self.gem_param_size):
# print(gem_param_name, gem_param_size)
# exit()
self.orders = dict()
for order, train_set in zip(opt.train_set_orders, opt.train_sets):
if order not in self.orders:
self.orders[order] = list()
self.orders[order].append(train_set)
memory_size = len(self.orders)
self.grads = torch.Tensor(sum(self.gem_param_size), memory_size).cuda()
def eval(self, data):
self.print("[INFO] Running cross-entropy evaluation...", flush=True)
opt = self.opt
rank = self.rank
world_size = self.world_size
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, rank, world_size, seed=self.opt.seed,
num_workers=1, epoch=1, buffer_size=opt.buffer_size, split_even=False,
dataset_ids=opt.valid_sets)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
data_size = len(data_iterator)
i = 0
self.model.eval()
self.loss_function.eval()
if opt.load_pretrained_classifier:
self.classifier.eval()
total_loss = zero_tensor()
total_words = zero_tensor()
total_correct = zero_tensor()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
with torch.no_grad():
# while not data_iterator.end_of_epoch():
while i < len(epoch_iterator):
samples = next(epoch_iterator)
def maybe_no_sync():
if isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
if samples:
with maybe_no_sync():
with autocast(enabled=opt.fp16):
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
mirror=opt.mirror_loss, streaming_state=streaming_state, nce=opt.nce,
pretrained_layer_states=layer_states)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
correct, total = loss_dict['correct'], loss_dict['total']
# if total != batch.tgt_size:
# # print(batch.get('target').size())
# # print(batch.get('target_output').size())
# targets = batch.get('target_output')
# targets_ = targets.view(-1)
# non_pad_mask = torch.nonzero(targets_.ne(self.loss_function.padding_idx)).squeeze(1)
# labels = targets_.index_select(0, non_pad_mask)
# print(labels, labels.numel(), batch.tgt_size)
assert (total == batch.tgt_size), \
"Process %i, Minibatch %d/%d: Expected %d tokens from the batch, got %d" \
% (self.rank, i, data_size, batch.tgt_size, total)
# print(i, len(data_iterator), total, batch.tgt_size, loss_data)
total_loss.add_(loss_data)
total_words.add_(batch.tgt_size)
total_correct.add_(correct)
i = i + 1
# allreduce the total loss and total words from other processes
self.all_reduce(total_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_correct, op=dist.ReduceOp.SUM, group=self.group)
self.model.train()
self.loss_function.train()
if opt.load_pretrained_classifier:
self.classifier.train()
return total_loss.item() / total_words.item(), total_correct.item() / total_words.item()
def train_epoch(self, epoch, resume=False, itr_progress=None):
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
grad_norm = -1
memory_size = len(self.orders)
# Clear the gradients of the model
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
# self.model.module.reset_states()
# note: for Training split_even=True
dataset = train_data
data_iterators = dict()
for order in self.orders:
# self.orders[order] contains the list of training datasets for order
# [0] is by default the currently (newest) added datasets
data_iterators[order] = generate_data_iterator(dataset, self.rank, self.world_size,
seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size, split_even=True,
dataset_ids=self.orders[order])
data_iterator = data_iterators[0]
epoch_iterators = dict()
for order in self.orders:
# for the memory datasets, allow for reset_
reset_ = order != 0
epoch_iterators[order] = data_iterators[order].next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
epoch_iterator = epoch_iterators[0]
total_tokens, total_loss, total_words = zero_tensor(), zero_tensor(), zero_tensor()
total_non_pads = zero_tensor()
report_loss, report_tgt_words = zero_tensor(), zero_tensor()
report_ctc_loss = zero_tensor()
report_src_words = zero_tensor()
report_sents = zero_tensor()
report_rec_loss, report_rev_loss, report_mirror_loss = zero_tensor(), zero_tensor(), zero_tensor()
start = time.time()
n_samples = len(data_iterator)
counter = 0
num_accumulated_words = zero_tensor()
num_accumulated_sents = zero_tensor()
report_contrastive_loss = zero_tensor()
streaming_state = None
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
i = i * self.world_size
while not data_iterator.end_of_epoch():
self.grads.zero_()
# TODO: Sampling samples from the memory datasets
for t in self.orders:
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
if t == 0:
continue
memory_data_iterator = epoch_iterators[t]
if not memory_data_iterator.has_next():
# reset
epoch_iterators[t] = data_iterators[order].next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
memory_data_iterator = epoch_iterators[t]
prev_samples = next(memory_data_iterator)
batch = prepare_sample(prev_samples, device=self.device)
targets = batch.get('target_output')
streaming_state = None
with autocast(enabled=opt.fp16):
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
with torch.no_grad():
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, pretrained_layer_states=layer_states,
adv_ptb_grad=opt.virtual_adversarial_training_mode > 0,
checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn
)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
rev_loss_data = None
mirror_loss_data = 0
rec_loss_data = None
correct, total = loss_dict['correct'], loss_dict['total']
optimizer = self.optim.optimizer
# backward to get gradients (and synchronize between gpus)
self.grad_scaler.scale(full_loss).backward()
self.grad_scaler.unscale_(self.optim.optimizer)
store_grad(self.gem_params, self.grads, self.gem_param_size, order)
self.optim.optimizer.step(fake=True)
# self.grad_scaler.update()
# self.grad_scaler.step(self.optim.optimizer)
self.grad_scaler.update()
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
# zero model grads
# forward and backward pass
# synchronize the gradients and scale !!!!
# put them in the grads
# zero model grads
samples = next(epoch_iterator)
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
streaming_state = None
oom = zero_tensor()
counter = counter + 1
reduce = True if counter >= opt.update_frequency or i == (n_samples - 1) else False
try:
def maybe_no_sync():
if not reduce and isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
# when we dont reach the updating step, we do not need to synchronize the gradients
# thus disabling the backward grad sync to improve speed
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
with autocast(enabled=opt.fp16):
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
with torch.no_grad():
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, pretrained_layer_states=layer_states,
adv_ptb_grad=opt.virtual_adversarial_training_mode > 0,
checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn
)
batch_size = batch.size
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.ctc_loss > 0.0:
ctc_loss = self.ctc_loss_function(outputs, targets)
ctc_loss_data = ctc_loss.item()
full_loss = full_loss + opt.ctc_loss * ctc_loss
rev_loss_data = None
mirror_loss_data = 0
rec_loss_data = None
correct, total = loss_dict['correct'], loss_dict['total']
optimizer = self.optim.optimizer
grad_list = [p for p in self.model.parameters() if p.requires_grad]
model_input = None
vanilla_logits = None
# grad scaler has to be done outside of the autocast
self.grad_scaler.scale(full_loss).backward(inputs=grad_list)
except RuntimeError as e:
if 'out of memory' in str(e):
print('[WARNING]: ran out of memory on GPU %d' % self.rank, flush=True)
print('Input size at OOM position:', batch.get('source').size(),
batch.get('target').size())
raise e
loss = 0
batch_size = batch.size
src_size = batch.src_size
tgt_size = batch.tgt_size
num_accumulated_words.add_(tgt_size)
num_accumulated_sents.add_(batch_size)
# We only update the parameters after getting gradients from n mini-batches
update_flag = reduce
if update_flag:
# accumulated gradient case, in this case the update frequency
self.all_reduce(num_accumulated_words, op=dist.ReduceOp.SUM, group=self.group)
grad_denom = 1.0
self.grad_scaler.unscale_(self.optim.optimizer)
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words.item() * grad_denom
# the gradient is scaled by world size, so in order to match the model without multiGPU
# we rescale the model parameters w.r.t the world size
# grad_denom = grad_denom / self.world_size
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
if grad_denom != 1:
normalize_gradients(self.model.parameters(), grad_denom)
# Update the pagrameters.
# grad_norm = clip_grad_norm(self.model.parameters(), self.opt.max_grad_norm)
with torch.no_grad():
t = 0
store_grad(self.gem_params, self.grads, self.gem_param_size, t)
indx = torch.arange(1, len(self.orders), device=self.gem_params[0].device)
dotp = torch.mm(self.grads[:, 0].unsqueeze(0),
self.grads.index_select(1, indx))
self.margin = 0.5
if (dotp < 0).sum() != 0:
project2cone2(self.grads[:, t].unsqueeze(1),
self.grads.index_select(1, indx), self.margin)
overwrite_grad(self.gem_params, self.grads[:, t],
self.gem_param_size)
self.optim.step(scaler=self.grad_scaler)
self.grad_scaler.update()
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
counter = 0
num_accumulated_words.zero_()
num_accumulated_sents.zero_()
num_updates = self.optim._step
if (opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every) \
or (num_updates >= opt.max_step):
valid_loss, valid_accuracy = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('Validation perplexity: %g' % valid_ppl)
print('Validation accuracy: %g percent' % (100 * valid_accuracy))
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy,
itr=data_iterator)
if num_updates >= opt.max_step:
print('[INFO] Max-training-step reached.')
exit(0)
num_words = tgt_size
report_loss.add_(loss_data)
report_tgt_words.add_(num_words)
report_src_words.add_(src_size)
total_loss.add_(loss_data)
total_words.add_(num_words)
report_sents.add_(1)
# total_tokens += batch.get('target_output').nelement()
# total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
# batch_efficiency = total_non_pads / total_tokens
if opt.reconstruct:
report_rec_loss.add_(rec_loss_data)
if opt.mirror_loss:
report_rev_loss.add_(rev_loss_data)
report_mirror_loss.add_(mirror_loss_data)
if opt.ctc_loss > 0.0:
report_ctc_loss.add_(ctc_loss_data)
# control the index a little bit to ensure the log is always printed
if i == 0 or ((i + 1) % opt.log_interval < self.world_size):
self.all_reduce(report_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_tgt_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_src_words, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_sents, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_contrastive_loss, op=dist.ReduceOp.SUM, group=self.group)
if self.is_main():
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; grad_norm: %6.4f " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss.item() / report_tgt_words.item()),
grad_norm))
if opt.mirror_loss:
self.all_reduce(report_rev_loss, op=dist.ReduceOp.SUM, group=self.group)
rev_ppl = math.exp(report_rev_loss.item() / report_tgt_words.item())
log_string += (" rev_ppl: %6.2f ; " % rev_ppl)
log_string += (" mir_loss: %6.2f ; " % (report_mirror_loss / report_tgt_words))
if opt.ctc_loss > 0.0:
# if torch.isinf(report_ctc_loss):
# report_ctc_loss.zero_()
# self.all_reduce(report_ctc_loss, op=dist.ReduceOp.SUM, group=self.group)
ctc_loss = report_ctc_loss.item() / report_tgt_words.item()
log_string += (" ctcloss: %8.2f ; " % ctc_loss)
if opt.contrastive_loss_coeff > 0.0:
#
ctv_loss = report_contrastive_loss.item() / report_tgt_words.item()
log_string += (" ctv_loss: %8.2f ; " % ctv_loss)
log_string += ("lr: %.7f ; updates: %7d; " %
(self.optim.get_learning_rate(),
self.optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words.item() / (time.time() - start),
report_tgt_words.item() / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
self.print(log_string, flush=True)
report_loss.zero_()
report_tgt_words.zero_()
report_src_words.zero_()
report_rec_loss.zero_()
report_rev_loss.zero_()
report_mirror_loss.zero_()
report_ctc_loss.zero_()
# report_sents.zero_()
if report_contrastive_loss is not None:
report_contrastive_loss.zero_()
start = time.time()
# increase i by world size
i = i + self.world_size
return total_loss / total_words
def run(self, checkpoint=None):
opt = self.opt
if checkpoint is not None:
# TODO: have loading checkpoints for each process
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
itr_progress = None
resume = True
start_epoch = math.floor(checkpoint['epoch']) + 1 if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
# optim_state_dict = checkpoint['optim']
# # del checkpoint['optim']
del checkpoint
else:
itr_progress = None
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
#
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
if self.cuda:
self.warm_up()
if opt.estimate_fisher_information:
self.start_time = time.time()
self.estimate_fisher(self.train_data)
return
if opt.run_validation_before_training or opt.max_step <= 0:
valid_loss, valid_accuracy = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl, flush=True)
# percent is never used in plural :)
print('[INFO] Validation accuracy: %g percent' % (100 * valid_accuracy))
if opt.max_step <= 0:
if self.is_main():
self.save(0, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy)
return
self.start_time = time.time()
for epoch in range(start_epoch, start_epoch + opt.epochs):
self.print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
self.print('[INFO] Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_loss, valid_accuracy = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl)
print('[INFO] Validation accuracy: %g percent' % (100 * valid_accuracy))
self.save(epoch, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy)
itr_progress = None
resume = False
| 30,109 | 40.077763 | 128 | py |
NMTGMinor | NMTGMinor-master/onmt/train_utils/mp_trainer.py | from __future__ import division
import datetime
import gc
import math
import os
import re
import time
import torch
import copy
import sys
import contextlib
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.multidata_iterator import MultiDataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.model_factory import init_model_parameters
from onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients, clip_grad_norm
from onmt.model_factory import build_model, optimize_model, init_model_parameters
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP_model
from torch.cuda.amp import autocast
import warnings
from onmt.constants import add_tokenidx
import dill
from multiprocessing.managers import ListProxy as ListProxy
# ignore the pytorch -> numpy conversion warnings
warnings.filterwarnings("ignore", category=UserWarning)
def prepare_sample(batch, device=None):
"""
Put minibatch on the corresponding GPU
:param batch:
:param device:
:return:
"""
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
batch.cuda(fp16=False, device=device)
return batch
def is_list(object):
if isinstance(object, list):
return True
elif isinstance(object, ListProxy):
return True
return False
def generate_data_iterator(dataset, rank, world_size, seed,
num_workers=1, epoch=1., buffer_size=0, split_even=True,
dataset_ids=None):
# check if dataset is a list:
if is_list(dataset):
# this is a multidataset
data_iterator = MultiDataIterator(dataset, seed=seed, num_workers=num_workers,
epoch=epoch, buffer_size=buffer_size,
num_shards=world_size, shard_id=rank, split_even=split_even,
dataset_ids=dataset_ids)
else:
data_iterator = DataIterator(dataset, dataset.get_collater(), dataset.get_batches(), seed=seed,
num_workers=num_workers, epoch=epoch, buffer_size=buffer_size,
num_shards=world_size, shard_id=rank, split_even=split_even)
return data_iterator
def zero_tensor(device=None):
if device is None:
return torch.Tensor([0]).cuda()
else:
return torch.Tensor([0]).to(device)
def all_reduce_and_rescale_tensors(tensors, rescale_denom=1,
buffer_size=10485760):
"""All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset + numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
torch.distributed.all_reduce(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset + numel])
offset += numel
with torch.no_grad():
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
torch.distributed.all_reduce(t)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
all_reduce_buffer()
class Trainer(object):
# def __init__(self, device, train_data, valid_data, dicts, opt, constants=None, setup_optimizer=True):
def __init__(self, device, dicts, opt, constants=None, setup_optimizer=True):
"""
:param model:
:param device: int (GPU id)
:param loss_function:
:param train_data:
:param valid_data:
:param dicts:
:param opt:
"""
self.device = device
opt.node_rank = 0
opt.nodes = 1
self.world_size = len(opt.gpus)
self.constants = dill.loads(constants) if constants is not None else None
# in the case of single node distributed, it should equal self.device
self.rank = self.device
# make a group to later use with self.all_reduce
self.group = dist.group.WORLD
self.print("[INFO] Training Options:", opt)
if self.world_size > 1:
dist.init_process_group(backend='nccl', init_method='env://', world_size=self.world_size, rank=self.rank)
self.model = None
self.dicts = dicts
self.opt = opt
self.cuda = (len(opt.gpus) >= 1 and opt.gpus[0] >= 0)
if self.cuda:
torch.cuda.set_device(self.device)
assert self.cuda, "[ERROR] Training is only available on GPUs."
self.start_time = 0
torch.manual_seed(self.opt.seed)
# note: we must start creating models after ccreating the processes
# for some reason passing a pre-created model to a process creates a "pickle" error
if self.is_main():
print("[INFO] Building models .... ", flush=True)
print("Languages: ", dicts['langs'], flush=True)
model = build_model(opt, dicts, False, self.constants)
""" Building the loss function """
tgt_pad = dicts['tgt_pad']
if opt.ctc_loss > 0.0:
from onmt.speech.ctc_loss import CTC
self.ctc_loss_function = CTC(dicts['tgt'].size(), opt.model_size, 0.0, reduce=True,
padding_idx=tgt_pad, blank_idx=0)
if opt.predict_language:
from onmt.models.speech_recognizer.lid_loss import CrossEntropyLIDLoss
self.lid_loss_function = CrossEntropyLIDLoss(opt.n_languages, label_smoothing=0.0)
if opt.nce:
from onmt.modules.nce.nce_loss import NCELoss
loss_function = NCELoss(opt.model_size, dicts['tgt'].size(), noise_ratio=opt.nce_noise,
logz=9, label_smoothing=opt.label_smoothing)
else:
loss_function = NMTLossFunc(opt.model_size, dicts['tgt'].size(),
label_smoothing=opt.label_smoothing,
mirror=opt.mirror_loss,
padding_idx=tgt_pad)
# This function replaces modules with the more optimized counterparts so that it can run faster
# Currently exp with LayerNorm
# distributed is required to convert BatchNorm to SyncBatchNorm for DDP
optimize_model(model, distributed=(self.world_size > 1))
if opt.load_pretrained_classifier:
from onmt.model_factory import build_classifier
self.print("Loading pretrained external classifier ...", flush=True)
classifier_checkpoint = torch.load(opt.load_pretrained_classifier,
map_location=lambda storage, loc: storage)
classifier_opt = classifier_checkpoint['opt']
classifier_dicts = classifier_checkpoint['dicts']
self.classifier = build_classifier(classifier_opt, classifier_dicts)
self.classifier.load_state_dict(classifier_checkpoint['model'])
init_model_parameters(model, opt)
self.model = model
self.loss_function = loss_function
self.grad_scaler = torch.cuda.amp.GradScaler()
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
try:
self.model.load_state_dict(checkpoint['model'])
except RuntimeError as e:
self.model.load_state_dict(checkpoint['model'], strict=True)
# if 'scaler' in checkpoint and checkpoint['scaler'] is not None:
# self.grad_scaler.load_state_dict(checkpoint['scaler'])
if self.cuda:
self.loss_function = self.loss_function.cuda(device=self.device)
self.model = self.model.cuda(device=self.device)
if opt.ctc_loss > 0.0:
self.ctc_loss_function = self.ctc_loss_function.cuda(device=self.device)
if opt.load_pretrained_classifier:
self.classifier = self.classifier.cuda(device=self.device)
# Ensure that the distributed copies have the same initial parameters
# Manual seed may not work the same for different GPU models.
# if self.world_size > 1:
# params = [p for p in self.model.parameters()]
#
# with torch.no_grad():
# if not self.is_main():
# # zero everything except for the main model
# for p in params:
# p.zero_()
# else:
# for p in params:
# p.add_(0)
# run all_reduce to ensure that all models have exactly the same parameters
# if self.world_size > 1:
# params = [p for p in self.model.parameters()]
# all_reduce_and_rescale_tensors(params, 1)
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if self.is_main():
print("[INFO] Optimizer: ", self.optim.optimizer)
if opt.load_from and not opt.reset_optim:
if 'optim' in checkpoint and checkpoint['optim'] is not None and not opt.reset_optim:
self.optim.load_state_dict(checkpoint['optim'])
if opt.starting_step > 0:
print("[INFO] Optimizer starting from state %d " % opt.starting_step)
self.optim.set_starting_step(opt.starting_step)
if self.world_size > 1:
find_unused_parameters = opt.find_unused_parameters
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.rank],
output_device=self.rank,
find_unused_parameters=find_unused_parameters)
if self.is_main():
nparams = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("[INFO] Total number of trainable paramaters: %d" % nparams)
nparams = sum(p.numel() for p in model.parameters())
print("[INFO] Total number of paramaters: %d" % nparams)
if opt.load_fisher:
if self.is_main():
print("[INFO] Loading fisher information from: %s" % opt.load_fisher)
self.fisher_info = torch.load(opt.load_fisher, map_location=lambda storage, loc: storage)
if self.cuda:
for n in self.fisher_info['mean']:
self.fisher_info['mean'][n] = self.fisher_info['mean'][n].cuda()
for n in self.fisher_info['fisher_diag']:
self.fisher_info['fisher_diag'][n] = self.fisher_info['fisher_diag'][n].cuda()
else:
self.fisher_info = None
print("[INFO] Process %d ready." % self.rank, flush=True)
def is_main(self):
return self.rank == 0
def all_reduce(self, tensor, **kwargs):
if self.world_size > 1:
dist.all_reduce(tensor, **kwargs)
return
def print(self, *content, flush=False):
"""
A helper function to print only on the main process
:param flush:
:param content:
:return:
"""
if self.is_main():
print(*content, flush=flush)
else:
return
def load_encoder_weight(self, checkpoint_file, wav2vec=False):
if not wav2vec:
print("Loading pretrained Encoder Weights from %s" % checkpoint_file, flush=True)
checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
pretrained_model = build_model(checkpoint['opt'], checkpoint['dicts'], False, self.constants)
pretrained_model.load_state_dict(checkpoint['model'])
model = self.model.module if self.world_size > 1 else self.model
model.load_encoder_weights(pretrained_model)
else:
checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
model = self.model.module if self.world_size > 1 else self.model
model.load_encoder_weights(checkpoint)
return
def load_decoder_weight(self, checkpoint_file):
self.print("Loading pretrained models from %s" % checkpoint_file)
checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)
chkpoint_dict = checkpoint['dicts']
pretrained_model = build_model(checkpoint['opt'], chkpoint_dict, False, self.constants)
pretrained_model.load_state_dict(checkpoint['model'])
self.print("Loading pretrained decoder weights ...")
# first we have to remove the embeddings which probably have difference size ...
pretrained_word_emb = pretrained_model.decoder.word_lut
pretrained_model.decoder.word_lut = None
pretrained_lang_emb = pretrained_model.decoder.language_embeddings
pretrained_model.decoder.language_embeddings = None
# actually we assume that two decoders have the same language embeddings...
untrained_word_emb = self.model.decoder.word_lut
self.model.decoder.word_lut = None
untrained_lang_emb = self.model.decoder.language_embeddings
self.model.decoder.language_embeddings = None
decoder_state_dict = pretrained_model.decoder.state_dict()
self.model.decoder.load_state_dict(decoder_state_dict)
# now we load the embeddings ....
n_copies = 0
for token in self.dicts['tgt'].labelToIdx:
untrained_id = self.dicts['tgt'].labelToIdx[token]
if token in chkpoint_dict['tgt'].labelToIdx:
pretrained_id = chkpoint_dict['tgt'].labelToIdx[token]
untrained_word_emb.weight.data[untrained_id].copy_(pretrained_word_emb.weight.data[pretrained_id])
self.model.generator[0].linear.bias.data[untrained_id].copy_(pretrained_model
.generator[0].linear.bias.data[
pretrained_id])
n_copies += 1
self.print("Copied embedding for %d words" % n_copies)
self.model.decoder.word_lut = untrained_word_emb
# now we load the language embeddings ...
if pretrained_lang_emb and untrained_lang_emb and 'langs' in chkpoint_dict:
for lang in self.dicts['langs']:
untrained_id = self.dicts['langs'][lang]
if lang in chkpoint_dict['langs']:
pretrained_id = chkpoint_dict['langs'][lang]
untrained_lang_emb.weight.data[untrained_id].copy_(pretrained_lang_emb.weight.data[pretrained_id])
self.model.decoder.language_embeddings = untrained_lang_emb
def warm_up(self, train_data):
"""
Warmup the memory allocator, by attempting to fit the largest batch
:return:
"""
batch = train_data[0].get_largest_batch(bsz=-1, src_size=-1, tgt_size=-1) \
if is_list(train_data) \
else train_data.get_largest_batch(bsz=328, src_size=319520, tgt_size=18)
opt = self.opt
if self.cuda:
batch.cuda(fp16=False)
self.model.train()
self.loss_function.train()
loss = 0
for p in self.model.parameters():
loss = loss + p.sum() * 0
# this will create zero grads
loss.backward()
# self.model.zero_grad()
oom = False
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
# try:
with autocast(enabled=opt.fp16):
targets = batch.get('target_output')
tgt_mask = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.ctc_loss > 0.0:
ctc_loss = self.ctc_loss_function(outputs, targets)
ctc_loss_data = ctc_loss.item()
full_loss = full_loss + opt.ctc_loss * ctc_loss
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
if opt.predict_lang:
lid_loss = loss_dict['lid']
full_loss = full_loss + lid_loss
lid_loss_data = lid_loss.item()
else:
lid_loss_data = 0
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
if opt.lfv_multilingual:
lid_logits = outputs['lid_logits']
lid_labels = batch.get('target_lang')
lid_loss_function = self.loss_function.get_loss_function('lid_loss')
lid_loss = lid_loss_function(lid_logits, lid_labels)
full_loss = full_loss + lid_loss
optimizer = self.optim.optimizer
# Warning: self-defined parameter list
parameter_list = [p for p in self.model.parameters() if p.requires_grad]
# Later if we need to do Adversarial Perturbation:
self.grad_scaler.scale(full_loss).backward()
loss = 0
for p in parameter_list:
loss += p.sum() * 0.0
loss.backward()
for p in self.model.parameters():
if p.grad is not None:
p.grad.data.zero_()
# self.model.zero_grad()
# self.optim.zero_grad()
# self.optim.step()
# self.optim.reset()
# except RuntimeError as e:
# if 'out of memory' in str(e):
# oom = True
# # else:
# print("[INFO] Warning: out-of-memory in warming up. "
# "This is due to the largest batch is too big for the GPU.",
# flush=True)
# raise e
# else:
self.print("[INFO] Warming up successfully.", flush=True)
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state_dict = self.model.module.state_dict()
else:
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'scaler': self.grad_scaler.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def eval(self, data):
self.print("[INFO] Running cross-entropy evaluation...", flush=True)
opt = self.opt
rank = self.rank
world_size = self.world_size
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, rank, world_size, seed=self.opt.seed,
num_workers=1, epoch=1, buffer_size=opt.buffer_size, split_even=False,
dataset_ids=opt.valid_sets)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
data_size = len(data_iterator)
i = 0
self.model.eval()
self.loss_function.eval()
if opt.load_pretrained_classifier:
self.classifier.eval()
total_loss = zero_tensor()
total_words = zero_tensor()
total_correct = zero_tensor()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
with torch.no_grad():
# while not data_iterator.end_of_epoch():
while i < len(epoch_iterator):
samples = next(epoch_iterator)
def maybe_no_sync():
if isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
if samples:
with maybe_no_sync():
with autocast(enabled=opt.fp16):
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
mirror=opt.mirror_loss, streaming_state=streaming_state, nce=opt.nce,
pretrained_layer_states=layer_states)
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
correct, total = loss_dict['correct'], loss_dict['total']
# if total != batch.tgt_size:
# # print(batch.get('target').size())
# # print(batch.get('target_output').size())
# targets = batch.get('target_output')
# targets_ = targets.view(-1)
# non_pad_mask = torch.nonzero(targets_.ne(self.loss_function.padding_idx)).squeeze(1)
# labels = targets_.index_select(0, non_pad_mask)
# print(labels, labels.numel(), batch.tgt_size)
assert (total == batch.tgt_size), \
"Process %i, Minibatch %d/%d: Expected %d tokens from the batch, got %d" \
% (self.rank, i, data_size, batch.tgt_size, total)
# print(i, len(data_iterator), total, batch.tgt_size, loss_data)
total_loss.add_(loss_data)
total_words.add_(batch.tgt_size)
total_correct.add_(correct)
i = i + 1
# allreduce the total loss and total words from other processes
self.all_reduce(total_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(total_correct, op=dist.ReduceOp.SUM, group=self.group)
self.model.train()
self.loss_function.train()
if opt.load_pretrained_classifier:
self.classifier.train()
return total_loss.item() / total_words.item(), total_correct.item() / total_words.item()
def train_epoch(self, train_data, valid_data, epoch, resume=False, itr_progress=None):
opt = self.opt
streaming = opt.streaming
grad_norm = -1
# Clear the gradients of the model
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
# self.model.module.reset_states()
# note: for Training split_even=True
dataset = train_data
data_iterator = generate_data_iterator(dataset, self.rank, self.world_size,
seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size, split_even=True,
dataset_ids=opt.train_sets)
# TODO: fix resume which is currently buggy
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = zero_tensor(), zero_tensor(), zero_tensor()
total_non_pads = zero_tensor()
report_loss, report_tgt_words = zero_tensor(), zero_tensor()
report_ctc_loss = zero_tensor()
report_ewc_loss = zero_tensor()
report_ewc_count = 0
report_src_words = zero_tensor()
report_sents = zero_tensor()
report_rec_loss, report_rev_loss, report_mirror_loss = zero_tensor(), zero_tensor(), zero_tensor()
report_enc_lid_loss = zero_tensor()
report_enc_lid_count = 0
report_dec_lid_loss = zero_tensor()
report_dec_lid_count = 0
start = time.time()
n_samples = len(data_iterator)
counter = 0
num_accumulated_words = zero_tensor()
num_accumulated_sents = zero_tensor()
report_contrastive_loss = zero_tensor()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
ewc_importance = opt.ewc_importance
if ewc_importance > 0:
assert self.fisher_info is not None
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model = self.model.module
else:
model = self.model
# parameters = {n: p for n, p in model.named_parameters() if p.requires_grad}
parameters = dict()
for n, p in model.named_parameters():
if n in self.fisher_info['mean'] and p.requires_grad:
parameters[n] = p
i = data_iterator.iterations_in_epoch if not is_list(train_data) else epoch_iterator.n_yielded
i = i * self.world_size
while not data_iterator.end_of_epoch():
# curriculum = (epoch < opt.curriculum)
# this batch generator is not very clean atm
# TODO: move everything to the multiGPU trainer
samples = next(epoch_iterator)
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
if opt.streaming:
if train_data.is_new_stream():
streaming_state = self.model.init_stream()
else:
streaming_state = None
# TODO: dealing with oom during distributed training
oom = zero_tensor()
counter = counter + 1
reduce = True if counter >= opt.update_frequency or i == (n_samples - 1) else False
try:
def maybe_no_sync():
if not reduce and isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
# when we dont reach the updating step, we do not need to synchronize the gradients
# thus disabling the backward grad sync to improve speed
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
with autocast(enabled=opt.fp16):
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
with torch.no_grad():
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, pretrained_layer_states=layer_states,
adv_ptb_grad=opt.virtual_adversarial_training_mode > 0,
checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn
)
batch_size = batch.size
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.ctc_loss > 0.0:
ctc_loss = self.ctc_loss_function(outputs, targets)
ctc_loss_data = ctc_loss.item()
full_loss = full_loss + opt.ctc_loss * ctc_loss
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
rev_loss_data = loss_dict['rev_loss_data']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
mirror_loss_data = loss_dict['mirror_loss'].item()
else:
rev_loss_data = None
mirror_loss_data = 0
if opt.predict_language:
enc_pred_lang = outputs['enc_pred_lang']
enc_mask = outputs['src_mask']
enc_lid_loss = self.lid_loss_function(enc_pred_lang, batch.get("source_lang"), enc_mask)
dec_pred_lang = outputs['dec_pred_lang']
# dec_mask = outputs['target_mask']
# dec_mask = targets.eq(onmt.constants.PAD)
dec_mask = batch.get('target_input_selfattn_mask')
dec_lid_loss = self.lid_loss_function(dec_pred_lang, batch.get("target_lang"), dec_mask)
full_loss = full_loss + 0.01 * (enc_lid_loss + dec_lid_loss)
report_enc_lid_loss.add_(enc_lid_loss.item())
report_enc_lid_count += enc_mask.ne(1).int().sum().item()
# print(dec_mask)
# print(dec_mask.ne(1).int().sum().item())
report_dec_lid_loss.add_(dec_lid_loss.item())
report_dec_lid_count += dec_mask.ne(1).int().sum().item()
else:
enc_lid_loss = None
enc_lid_loss_data = None
dec_lid_loss = None
dec_lid_loss_data = None
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
rec_loss_data = loss_dict['rec_loss_data']
else:
rec_loss_data = None
if opt.contrastive_loss_coeff > 0 and 'contrastive_loss' in outputs:
contrastive_loss = outputs['contrastive_loss']
full_loss = full_loss + opt.contrastive_loss_coeff * contrastive_loss
report_contrastive_loss.add_(contrastive_loss.item())
correct, total = loss_dict['correct'], loss_dict['total']
optimizer = self.optim.optimizer
# ewc_penalty = ewc_penalty + (torch.square(parameters[n] - self.fisher_info['mean'][n]) *
# self.fisher_info['fisher_diag'][n]).sum()
# full_loss += ewc_penalty * ewc_importance
# TODO for adversarial:
grad_list = [p for p in self.model.parameters() if p.requires_grad]
if opt.virtual_adversarial_training_mode > 0:
# if we use virtual adversarial training: add the input to the list of gradient to take
model_input = outputs['source']
vanilla_logits = outputs['logprobs']
grad_list += [model_input]
else:
model_input = None
vanilla_logits = None
# grad scaler has to be done outside of the autocast
self.grad_scaler.scale(full_loss).backward()
# del outputs
if opt.virtual_adversarial_training_mode > 0:
# run forward pass one more time
# the perturbation is the gradient of the model w.r.t the input
perturb = model_input.grad.data.new(*model_input.size()).copy_(model_input.grad.data)
with autocast(enabled=opt.fp16):
assert model_input.grad is not None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
pretrained_layer_states=layer_states,
input_ptb=perturb)
full_loss = None
# compute loss for mode 2 3
# In this mode, we add noise to the input and minimise the loss given the noisy inputs
if opt.virtual_adversarial_training_mode in [2, 3]:
loss_dict = self.loss_function(outputs, targets, model=self.model)
full_loss = loss_dict['loss']
# for mode 1, 3 compute kl divergence
# In this mode, we minimise the kl divergence between the model output with and without noise
if opt.virtual_adversarial_training_mode in [1, 3]:
logits = outputs['logprobs']
with torch.no_grad():
vanilla_probs = \
F.softmax(vanilla_logits.float().view(-1, vanilla_logits.size(-1)), dim=-1)
vanilla_probs.detach_()
noisy_probs = F.softmax(logits.float().view(-1, logits.view(-1, logits.size(-1))),
dim=-1)
# Note: with the kl_div_loss we don't backward w.r.t the vanilla probs
kl_div_loss = F.kl_div(noisy_probs, vanilla_probs, reduction='sum')
if full_loss is None:
full_loss = kl_div_loss
else:
full_loss += kl_div_loss
# Now we only get the gradients for the weights of the network
grad_list = [p for p in self.model.parameters() if p.requires_grad]
self.grad_scaler.scale(full_loss).backward()
del outputs
# EWC training: no need for autograd here?
if self.optim._step % opt.ewc_decay_every == 0:
ewc_importance = ewc_importance / opt.ewc_decay_scale
# only run this ewc everytime we reduce
# if isinstance(self.model, DDP_model):
# torch.cuda.synchronize(device=self.rank)
except RuntimeError as e:
if 'out of memory' in str(e):
print('[WARNING]: ran out of memory on GPU %d' % self.rank, flush=True)
print('Input size at OOM position:', batch.get('source').size(),
batch.get('target').size())
# recovering mechanism doesn't work at the moment
# loss = 0
# for p in self.model.parameters():
# if p.grad is not None:
# del p.grad # free some memory
# loss = loss + p.sum() * 0
# torch.cuda.empty_cache()
#
# if opt.streaming: # reset stream in this case ...
# streaming_state = self.model.init_stream()
#
#
# # backward to actually free the graph
# # self.grad_scaler.scale(loss).backward()
# oom.add_(1)
raise e
# connecting the oom signal from different gpus
# self.all_reduce(oom, op=dist.ReduceOp.SUM, group=self.group)
# # if OOM: all gpus reset grad and reset counter
# # or maybe all-reduce grad?
# if oom.item() > 0:
# # reset counter
# self.model.zero_grad()
# self.optim.zero_grad()
# counter = 0
# oom.zero_()
batch_size = batch.size
src_size = batch.src_size
tgt_size = batch.tgt_size
num_accumulated_words.add_(tgt_size)
num_accumulated_sents.add_(batch_size)
# We only update the parameters after getting gradients from n mini-batches
update_flag = reduce
if update_flag:
# accumulated gradient case, in this case the update frequency
self.all_reduce(num_accumulated_words, op=dist.ReduceOp.SUM, group=self.group)
grad_denom = 1.0
self.grad_scaler.unscale_(self.optim.optimizer)
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words.item() * grad_denom
else:
grad_denom = 1
# the gradient is scaled by world size, so in order to match the model without multiGPU
# we rescale the model parameters w.r.t the world size
# grad_denom = grad_denom / self.world_size
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
if grad_denom != 1:
normalize_gradients(self.model.parameters(), grad_denom)
# Update the pagrameters.
grad_norm = clip_grad_norm(self.model.parameters(), self.opt.max_grad_norm)
if ewc_importance > 0:
ewc_penalty = 0
if self.optim._step >= opt.ewc_delay:
# if at the moment weights/gradients/mean and fisher_diag are all the same and unscaled
# then we don't need to synchronize the gradients
with self.model.no_sync():
for n, p in self.model.named_parameters():
if isinstance(self.model, DDP_model):
n = n[len("module."):]
if n in self.fisher_info['mean']:
penalty = self.fisher_info['fisher_diag'][n] * \
torch.square(p - self.fisher_info['mean'][n].data)
ewc_penalty = ewc_penalty + penalty.sum()
loss = ewc_penalty * ewc_importance
ewc_loss = ewc_penalty.item()
# accumulate the gradients from EWC loss
loss.backward()
report_ewc_loss.add_(ewc_loss)
report_ewc_count += 1
self.optim.step(scaler=self.grad_scaler)
self.grad_scaler.update()
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
counter = 0
num_accumulated_words.zero_()
num_accumulated_sents.zero_()
num_updates = self.optim._step
if (opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every) \
or (num_updates >= opt.max_step):
valid_loss, valid_accuracy = self.eval(valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('Validation perplexity: %g' % valid_ppl)
print('Validation accuracy: %g percent' % (100 * valid_accuracy))
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy,
itr=data_iterator)
if num_updates >= opt.max_step:
print('[INFO] Max-training-step reached.')
exit(0)
num_words = tgt_size
report_loss.add_(loss_data)
report_tgt_words.add_(num_words)
report_src_words.add_(src_size)
total_loss.add_(loss_data)
total_words.add_(num_words)
report_sents.add_(1)
# total_tokens += batch.get('target_output').nelement()
# total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
# batch_efficiency = total_non_pads / total_tokens
if opt.reconstruct:
report_rec_loss.add_(rec_loss_data)
if opt.mirror_loss:
report_rev_loss.add_(rev_loss_data)
report_mirror_loss.add_(mirror_loss_data)
if opt.ctc_loss > 0.0:
report_ctc_loss.add_(ctc_loss_data)
# control the index a little bit to ensure the log is always printed
if i == 0 or ((i + 1) % opt.log_interval < self.world_size):
self.all_reduce(report_loss, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_ewc_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_tgt_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_src_words, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_sents, op=dist.ReduceOp.SUM, group=self.group)
# self.all_reduce(report_contrastive_loss, op=dist.ReduceOp.SUM, group=self.group)
if self.is_main():
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; grad_norm: %6.4f " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss.item() / report_tgt_words.item()),
grad_norm))
# if opt.reconstruct:
# self.all_reduce(report_rec_loss, op=dist.ReduceOp.SUM, group=self.group)
# rec_ppl = math.exp(report_rec_loss.item() / report_src_words.item())
# log_string += (" rec_ppl: %6.2f ; " % rec_ppl)
if opt.mirror_loss:
self.all_reduce(report_rev_loss, op=dist.ReduceOp.SUM, group=self.group)
rev_ppl = math.exp(report_rev_loss.item() / report_tgt_words.item())
log_string += (" rev_ppl: %6.2f ; " % rev_ppl)
log_string += (" mir_loss: %6.2f ; " % (report_mirror_loss / report_tgt_words))
if opt.ctc_loss > 0.0:
# if torch.isinf(report_ctc_loss):
# report_ctc_loss.zero_()
# self.all_reduce(report_ctc_loss, op=dist.ReduceOp.SUM, group=self.group)
ctc_loss = report_ctc_loss.item() / report_tgt_words.item()
log_string += (" ctcloss: %8.2f ; " % ctc_loss)
if opt.contrastive_loss_coeff > 0.0:
#
ctv_loss = report_contrastive_loss.item() / report_tgt_words.item()
log_string += (" ctv_loss: %8.2f ; " % ctv_loss)
if ewc_importance > 0.0:
try:
_ewc_loss = report_ewc_loss.item() / report_ewc_count
except ZeroDivisionError:
_ewc_loss = float('nan')
log_string += (" ewcloss: %8.8f ; " % _ewc_loss)
if opt.predict_language:
try:
_enc_lid_loss = report_enc_lid_loss.item() / report_enc_lid_count
_dec_lid_loss = report_dec_lid_loss.item() / report_dec_lid_count
except ZeroDivisionError:
_enc_lid_loss = float('nan')
_dec_lid_loss = float('nan')
log_string += (" enc_lidloss: %8.8f ; " % _enc_lid_loss)
log_string += (" dec_lidloss: %8.8f ; " % _dec_lid_loss)
log_string += ("lr: %.7f ; updates: %7d; " %
(self.optim.get_learning_rate(),
self.optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words.item() / (time.time() - start),
report_tgt_words.item() / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
self.print(log_string, flush=True)
report_loss.zero_()
report_tgt_words.zero_()
report_src_words.zero_()
report_rec_loss.zero_()
report_rev_loss.zero_()
report_mirror_loss.zero_()
report_ctc_loss.zero_()
report_ewc_loss.zero_()
report_ewc_count = 0
# report_sents.zero_()
if report_contrastive_loss is not None:
report_contrastive_loss.zero_()
start = time.time()
# increase i by world size
i = i + self.world_size
return total_loss / total_words
def estimate_fisher(self, data):
"""
This function estimates the Fisher Information (only diagonal) on a data
:param data: train or dev data
:return: fisher
"""
def is_factorize_params(p_name):
# feed forward neural net
if p_name.endswith(".r_i") or p_name.endswith(".s_i") \
or p_name.endswith(".r_o") or p_name.endswith(".s_o") \
or p_name.endswith(".r_p") or p_name.endswith(".s_p"):
return True
if p_name.endswith(".r_q") or p_name.endswith(".s_q") \
or p_name.endswith(".r_o") or p_name.endswith(".s_o") \
or p_name.endswith(".r_kv") or p_name.endswith(".s_kv"):
return True
if p_name.endswith(".rm_q") or p_name.endswith(".sm_q") \
or p_name.endswith(".rm_o") or p_name.endswith(".sm_o") \
or p_name.endswith(".rm_kv") or p_name.endswith(".sm_kv"):
return True
if p_name.endswith(".sub_r_i") or p_name.endswith(".sub_s_i") \
or p_name.endswith(".sub_r_o") or p_name.endswith(".sub_s_o") \
or p_name.endswith(".sub_r_p") or p_name.endswith(".sub_s_p"):
return True
if p_name.endswith(".sub_r_q") or p_name.endswith(".sub_s_q") \
or p_name.endswith(".sub_r_o") or p_name.endswith(".sub_s_o") \
or p_name.endswith(".sub_r_kv") or p_name.endswith(".sub_s_kv"):
return True
if p_name.endswith(".sub_rm_q") or p_name.endswith(".sub_sm_q") \
or p_name.endswith(".sub_rm_o") or p_name.endswith(".sub_sm_o") \
or p_name.endswith(".sub_rm_kv") or p_name.endswith(".sub_sm_kv"):
return True
if p_name.endswith(".rm_i") or p_name.endswith(".sm_i") or \
p_name.endswith(".rm_o") or p_name.endswith(".sm_o") or \
p_name.endswith(".rm_p") or p_name.endswith(".sm_p"):
return True
if p_name.endswith(".sub_rm_i") or p_name.endswith(".sub_sm_i") or \
p_name.endswith(".sub_rm_o") or p_name.endswith(".sub_sm_o") or \
p_name.endswith(".sub_rm_p") or p_name.endswith(".sub_sm_p"):
return True
if "adapter" in p_name:
return True
return False
if self.rank == 0:
print("[INFO] Estimating fisher information ...\n")
opt = self.opt
epoch = 0
assert len(opt.load_from) > 0
# Clear the gradients of the model
self.optim.zero_grad(set_to_none=False)
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model = self.model.module
else:
model = self.model
parameters = {n: p for n, p in model.named_parameters() if p.requires_grad}
precision_matrices = dict()
for n, p in parameters.items():
if not is_factorize_params(n):
precision_matrices[n] = torch.zeros_like(p)
# note: for Training split_even=True
dataset = data
data_iterator = generate_data_iterator(dataset, self.rank, self.world_size,
seed=self.opt.seed, num_workers=opt.num_workers,
epoch=0, buffer_size=opt.buffer_size, split_even=True,
dataset_ids=opt.train_sets)
streaming = False
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = zero_tensor(), zero_tensor(), zero_tensor()
total_non_pads = zero_tensor()
report_loss, report_tgt_words = zero_tensor(), zero_tensor()
report_ctc_loss = zero_tensor()
report_src_words = zero_tensor()
report_rec_loss, report_rev_loss, report_mirror_loss = zero_tensor(), zero_tensor(), zero_tensor()
start = time.time()
n_samples = len(data_iterator)
counter = 0
num_accumulated_words = zero_tensor()
num_accumulated_sents = zero_tensor()
report_contrastive_loss = zero_tensor()
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
i = data_iterator.iterations_in_epoch if not is_list(dataset) else epoch_iterator.n_yielded
i = i * self.world_size # incorrect?
self.model.train() # eliminate dropout (is it necessary)?
while not data_iterator.end_of_epoch():
# this batch generator is not very clean atm
# TODO: move everything to the multiGPU trainer
samples = next(epoch_iterator)
batch = prepare_sample(samples, device=self.device)
targets = batch.get('target_output')
if opt.streaming:
if train_data.is_new_stream():
streaming_state = self.model.init_stream()
else:
streaming_state = None
# TODO: dealing with oom during distributed training
oom = zero_tensor()
counter = counter + 1
# reduce = True if counter >= opt.update_frequency or i == (n_samples - 1) else False
reduce = False # never reduce :))))
try:
def maybe_no_sync():
if not reduce and isinstance(self.model, DDP_model):
return self.model.no_sync()
else:
# when we dont reach the updating step, we do not need to synchronize the gradients
# thus disabling the backward grad sync to improve speed
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
with autocast(enabled=opt.fp16):
tgt_mask = targets.ne(onmt.constants.PAD)
if opt.load_pretrained_classifier:
with torch.no_grad():
layer_states = self.classifier.encode(batch)
else:
layer_states = None
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce, pretrained_layer_states=layer_states,
adv_ptb_grad=opt.virtual_adversarial_training_mode > 0,
checkpointing_ffn=opt.checkpointing_ffn,
checkpointing_cross_attn=opt.checkpointing_cross_attn,
checkpointing_self_attn=opt.checkpointing_self_attn
)
batch_size = batch.size
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.ctc_loss > 0.0:
ctc_loss = self.ctc_loss_function(outputs, targets)
ctc_loss_data = ctc_loss.item()
full_loss = full_loss + opt.ctc_loss * ctc_loss
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
rev_loss_data = loss_dict['rev_loss_data']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
mirror_loss_data = loss_dict['mirror_loss'].item()
else:
rev_loss_data = None
mirror_loss_data = 0
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
rec_loss_data = loss_dict['rec_loss_data']
else:
rec_loss_data = None
if opt.contrastive_loss_coeff > 0 and 'contrastive_loss' in outputs:
contrastive_loss = outputs['contrastive_loss']
full_loss = full_loss + opt.contrastive_loss_coeff * contrastive_loss
report_contrastive_loss.add_(contrastive_loss.item())
correct, total = loss_dict['correct'], loss_dict['total']
optimizer = self.optim.optimizer
# grad scaler has to be done outside of the autocast
# TODO for adversarial:
grad_list = [p for p in self.model.parameters() if p.requires_grad]
if opt.virtual_adversarial_training_mode > 0:
# if we use virtual adversarial training: add the input to the list of gradient to take
model_input = outputs['source']
vanilla_logits = outputs['logprobs']
grad_list += [model_input]
else:
model_input = None
vanilla_logits = None
self.grad_scaler.scale(full_loss).backward()
except RuntimeError as e:
if 'out of memory' in str(e):
print('[WARNING]: ran out of memory on GPU %d' % self.rank, flush=True)
print('Input size at OOM position:', batch.get('source').size(),
batch.get('target').size())
# always raise the error
raise e
batch_size = batch.size
src_size = batch.src_size
tgt_size = batch.tgt_size
num_accumulated_words.add_(tgt_size)
num_accumulated_sents.add_(batch_size)
# unscale the gradient first
self.grad_scaler.unscale_(self.optim.optimizer)
# fake update. we need a learning rate = 0 for this
grad_norm = clip_grad_norm(self.model.parameters(), 0)
# self.optim.step(scaler=self.grad_scaler)
self.grad_scaler.update()
# Update the precision matrices.
for n, p in parameters.items():
if n in precision_matrices:
grad = p.grad.data
grad.masked_fill_(torch.logical_or(torch.isinf(grad), torch.isnan(grad)), 0)
precision_matrices[n].add_(torch.square(p.grad.data))
self.optim.zero_grad(set_to_none=opt.true_zero_grad)
counter = 0
num_words = tgt_size
report_loss.add_(loss_data)
report_tgt_words.add_(num_words)
report_src_words.add_(src_size)
total_loss.add_(loss_data)
total_words.add_(num_words)
# control the index a little bit to ensure the log is always printed
if i == 0 or ((i + 1) % opt.log_interval < self.world_size):
self.all_reduce(report_loss, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_tgt_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_src_words, op=dist.ReduceOp.SUM, group=self.group)
self.all_reduce(report_contrastive_loss, op=dist.ReduceOp.SUM, group=self.group)
if self.is_main():
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; grad_norm: %6.4f; gradscaler: %9.9f " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss.item() / report_tgt_words.item()),
grad_norm,
self.grad_scaler.get_scale()))
log_string += ("lr: %.7f ; updates: %7d; " %
(self.optim.get_learning_rate(),
self.optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words.item() / (time.time() - start),
report_tgt_words.item() / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
self.print(log_string, flush=True)
report_loss.zero_()
report_tgt_words.zero_()
report_src_words.zero_()
report_rec_loss.zero_()
report_rev_loss.zero_()
report_mirror_loss.zero_()
report_ctc_loss.zero_()
if report_contrastive_loss is not None:
report_contrastive_loss.zero_()
start = time.time()
# increase i by world size
i = i + self.world_size
if isinstance(self.model, DDP_model):
torch.cuda.synchronize(device=self.rank)
loss = 0
for n, p in parameters.items():
loss = loss + p.sum() * 0
# to force ddp to synchronize the last time (based on a zero loss -> zero grad
loss.backward()
self.all_reduce(num_accumulated_words, op=dist.ReduceOp.SUM, group=self.group)
if self.world_size > 1:
if self.rank == 0:
print("[INFO] Synchronizing precision matrices")
for n in precision_matrices:
self.all_reduce(precision_matrices[n], op=dist.ReduceOp.SUM, group=self.group)
if self.rank == 0:
print("Done...")
if self.rank == 0:
# Accumulate fisher info from previous iteration
if self.fisher_info is not None:
print("[INFO] Accumulating fisher information from a previous iteration...")
for n in precision_matrices:
if n in self.fisher_info:
precision_matrices[n] = self.fisher_info['fisher_diag'][n] + precision_matrices[n]
# normalizing by the number of sentences
# for n in precision_matrices:
# precision_matrices[n].div_(num_d_sents)
means = dict()
for n, p in parameters.items():
if n in precision_matrices:
means[n] = p
checkpoint = {
'mean': means,
'fisher_diag': precision_matrices,
'opt': opt
}
file_name = opt.load_from + ".fisher"
print("[INFO] Saving means and fisher information to %s" % file_name)
torch.save(checkpoint, file_name)
return total_loss / total_words
def run(self, train_data=None, valid_data=None, checkpoint=None):
opt = self.opt
if checkpoint is not None:
# TODO: have loading checkpoints for each process
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
itr_progress = None
resume = True
start_epoch = math.floor(checkpoint['epoch']) + 1 if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
# optim_state_dict = checkpoint['optim']
# # del checkpoint['optim']
del checkpoint
else:
itr_progress = None
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
#
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
# if self.cuda:
# self.warm_up(train_data=train_data)
if opt.estimate_fisher_information:
self.start_time = time.time()
self.estimate_fisher(train_data)
return
if opt.run_validation_before_training or opt.max_step <= 0:
valid_loss, valid_accuracy = self.eval(valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl, flush=True)
# percent is never used in plural :)
print('[INFO] Validation accuracy: %g percent' % (100 * valid_accuracy))
if opt.max_step <= 0:
if self.is_main():
self.save(0, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy)
return
self.start_time = time.time()
for epoch in range(start_epoch, start_epoch + opt.epochs):
self.print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(train_data, valid_data, epoch,
resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
self.print('[INFO] Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_loss, valid_accuracy = self.eval(valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
if self.is_main():
print('[INFO] Validation perplexity: %g' % valid_ppl)
print('[INFO] Validation accuracy: %g percent' % (100 * valid_accuracy))
self.save(epoch, valid_ppl if opt.save_metrics in ['ppl', 'perplexity'] else 1 - valid_accuracy)
itr_progress = None
resume = False
| 68,654 | 42.452532 | 121 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/perplexity_scorer.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from torch.autograd import Variable
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
model_list = ['transformer', 'stochastic_transformer']
class PerplexityScorer(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.search = BeamSearch(self.tgt_dict)
self.eos = onmt.constants.EOS
self.pad = onmt.constants.PAD
self.bos = self.bos_id
self.vocab_size = self.tgt_dict.size()
self.min_len = 1
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if opt.verbose:
print('* Current bos id: %d' % self.bos_id, onmt.constants.BOS)
print('* Using fast beam search implementation')
def scoreBatch(self, batch):
with torch.no_grad():
return self._scoreBatch(batch)
def _scoreBatch(self, batch):
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
return gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
decoder_output = self.models[i].step(tokens, decoder_states[i])
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# batch * beam x vocab_size
# outs[i] = self.models[i].generator(decoder_hidden)
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
# attn = attn[:, -1, :] # I dont know what this line means
attn = None # lol this is never used probably
return out, attn
def translate(self, src_data, tgt_data, type='mt'):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
# ~ batch = self.to_variable(dataset.next()[0])
batch_size = batch.size
# (2) translate
gold_score, gold_words, allgold_words = self.scoreBatch(batch)
return gold_score, gold_words, allgold_words
| 3,625 | 30.258621 | 80 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/stream_translator.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
from collections import defaultdict
class StreamTranslator(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.search = BeamSearch(self.tgt_dict)
self.eos = onmt.constants.EOS
self.pad = onmt.constants.PAD
self.bos = self.bos_id
self.vocab_size = self.tgt_dict.size()
self.min_len = 1
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
self.decoder_states = defaultdict(lambda: None)
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if hasattr(opt, 'dynamic_min_len_scale'):
self.dynamic_min_len_scale = opt.dynamic_min_len_scale
else:
self.dynamic_min_len_scale = 0.8
if opt.verbose:
print('* Current bos id: %d' % self.bos_id, onmt.constants.BOS)
print('* Using fast beam search implementation')
self.max_memory_size = opt.max_memory_size
for i in range(len(self.models)):
self.models[i].set_memory_size(self.max_memory_size, self.max_memory_size)
def reset_stream(self):
self.decoder_states = defaultdict(lambda: None)
def translateBatch(self, batch):
with torch.no_grad():
return self._translateBatch(batch)
def _translateBatch(self, batch):
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# initialize buffers
src = batch.get('source')
scores = src.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0].fill_(self.bos) # first token is bos
attn, attn_buf = None, None
nonpad_idxs = None
src_tokens = src.transpose(0, 1) # batch x time
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
prefix_tokens = None
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
# if self.match_source_len and step > src_lengths[unfin_idx]:
# score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
# initialize the decoder state, including:
# - expanding the context over the batch dimension len_src x (B*beam) x H
# - expanding the mask over the batch dimension (B*beam) x len_src
for i in range(self.n_models):
# decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size, type=2, streaming=False)
self.decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size,
previous_decoding_state=self.decoder_states[i],
streaming=True)
if self.dynamic_max_len:
src_len = src.size(0)
max_len = min(math.ceil(int(src_len) * self.dynamic_max_len_scale), self.opt.max_sent_length)
min_len = math.ceil(int(src_len) * self.dynamic_min_len_scale)
else:
min_len = self.min_len
# Start decoding
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
self.decoder_states[i]._reorder_incremental_state(reorder_state)
decode_input = tokens[:, :step + 1]
lprobs, avg_attn_scores = self._decode(decode_input, self.decoder_states)
avg_attn_scores = None
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.bos] = -math.inf # never select bos ...
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
# if prefix_tokens is not None and step < prefix_tokens.size(1):
# prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
# prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
# prefix_mask = prefix_toks.ne(self.pad)
# lprobs[prefix_mask] = -math.inf
# lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
# -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs
# )
# # if prefix includes eos, then we should make sure tokens and
# # scores are the same across all beams
# eos_mask = prefix_toks.eq(self.eos)
# if eos_mask.any():
# # validate that the first beam matches the prefix
# first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
# eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
# target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
# assert (first_beam == target_prefix).all()
#
# def replicate_first_beam(tensor, mask):
# tensor = tensor.view(-1, beam_size, tensor.size(-1))
# tensor[mask] = tensor[mask][:, :1, :]
# return tensor.view(-1, tensor.size(-1))
#
# # copy tokens, scores and lprobs from the first beam to all beams
# tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
# scores = replicate_first_beam(scores, eos_mask_batch_dim)
# lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
# if prefix_tokens is not None:
# prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
# self.decoder_states = defaultdict(lambda : None)
return finalized, gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# streaming = True in this case
decoder_output = self.models[i].step(tokens, decoder_states[i], streaming=True)
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# batch * beam x vocab_size
# outs[i] = self.models[i].generator(decoder_hidden)
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
# attn = attn[:, -1, :] # I dont know what this line means
attn = None # lol this is never used probably
return out, attn
def translate(self, src_data, tgt_data, type='mt'):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
# ~ batch = self.to_variable(dataset.next()[0])
batch_size = batch.size
# (2) translate
finalized, gold_score, gold_words, allgold_words = self.translateBatch(batch)
pred_length = []
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_score = []
for b in range(batch_size):
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
| 22,096 | 41.250478 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/Beam.py | from __future__ import division
import torch
import onmt
"""
Class for managing the internals of the beam search process.
hyp1-hyp1---hyp1 -hyp1
\ /
hyp2 \-hyp2 /-hyp2hyp2
/ \
hyp3-hyp3---hyp3 -hyp3
========================
Takes care of beams, back pointers, and scores.
"""
class Beam(object):
def __init__(self, size, bos_id, cuda=False, sampling=False):
self.size = size
self.done = False
if sampling:
self.size = 1
self.sampling = sampling
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.allScores = []
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
# self.nextYs = [self.tt.LongTensor(size).fill_(onmt.constants.PAD)]
self.nextYs = [self.tt.LongTensor(size).fill_(onmt.constants.TGT_PAD)]
# self.nextYs[0][0] = onmt.Constants.BOS
self.nextYs[0][0] = bos_id
# The attentions (matrix) for each time.
self.attn = []
def getCurrentState(self):
"Get the outputs for the current timestep."
return self.nextYs[-1]
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk, attnOut):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
# print(flatBeamLk.size())
# print(wordLk.size())
if not self.sampling:
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
else:
# because wordLk is log prob, exp to get distribution
probs = torch.exp(wordLk)
# print(probs.size())
bestScoresId = torch.multinomial(probs, 1).squeeze(1) # K x 1 to K
# print(bestScoresId, bestScoresId.size())
bestScores = flatBeamLk[bestScoresId]
# multinomial sampling
self.allScores.append(self.scores)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId.floor_divide(numWords)
self.prevKs.append(prevK)
self.nextYs.append(bestScoresId - prevK * numWords)
self.attn.append(attnOut.index_select(0, prevK))
# End condition is when top-of-beam is EOS.
if self.nextYs[-1][0] == onmt.constants.EOS:
self.done = True
self.allScores.append(self.scores)
return self.done
def sortBest(self):
return torch.sort(self.scores, 0, True)
def getBest(self):
"Get the score of the best in the beam."
scores, ids = self.sortBest()
return scores[1], ids[1]
def getHyp(self, k):
"""
Walk back to construct the full hypothesis.
Parameters.
* `k` - the position in the beam to construct.
Returns.
1. The hypothesis
2. The attention at each time step.
"""
hyp, attn = [], []
lengths = []
for j in range(len(self.prevKs) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
attn.append(self.attn[j][k])
k = self.prevKs[j][k]
length = len(hyp)
return hyp[::-1], torch.stack(attn[::-1]), length
| 4,071 | 28.085714 | 80 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/predictor.py | import onmt
import onmt.modules
import torch
from onmt.model_factory import build_classifier
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
from onmt.constants import add_tokenidx
from options import backward_compatible
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class Predictor(object):
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
self.fp16 = opt.fp16
self.attributes = opt.attributes # attributes split by |. for example: de|domain1
self.src_lang = opt.src_lang
self.tgt_lang = opt.tgt_lang
if self.attributes:
self.attributes = self.attributes.split("|")
self.models = list()
self.model_types = list()
# models are string with | as delimiter
models = opt.model.split("|")
print(models)
self.n_models = len(models)
self._type = 'text'
for i, model_path in enumerate(models):
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
if hasattr(model_opt, "enc_state_dict"):
model_opt.enc_state_dict = None
model_opt.dec_state_dict = None
self.main_model_opt = model_opt
dicts = checkpoint['dicts']
# update special tokens
onmt.constants = add_tokenidx(model_opt, onmt.constants, dicts)
self.bos_token = model_opt.tgt_bos_word
if i == 0:
if "src" in checkpoint['dicts']:
self.src_dict = checkpoint['dicts']['src']
else:
self._type = "audio"
# self.src_dict = self.tgt_dict
self.tgt_dict = checkpoint['dicts']['tgt']
print(self.tgt_dict.idxToLabel)
if "langs" in checkpoint["dicts"]:
self.lang_dict = checkpoint['dicts']['langs']
else:
self.lang_dict = {'src': 0, 'tgt': 1}
# self.bos_id = self.tgt_dict.labelToIdx[self.bos_token]
model = build_classifier(model_opt, checkpoint['dicts'])
# optimize_model(model)
if opt.verbose:
print('Loading model from %s' % model_path)
model.load_state_dict(checkpoint['model'])
if model_opt.model in model_list:
# if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:
# print("Not enough len to decode. Renewing .. ")
# model.decoder.renew_buffer(self.opt.max_sent_length)
model.renew_buffer(self.opt.max_sent_length)
# model.convert_autograd()
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
if opt.dynamic_quantile == 1:
engines = torch.backends.quantized.supported_engines
if 'fbgemm' in engines:
torch.backends.quantized.engine = 'fbgemm'
else:
print("[INFO] fbgemm is not found in the available engines. Possibly the CPU does not support AVX2."
" It is recommended to disable Quantization (set to 0).")
torch.backends.quantized.engine = 'qnnpack'
# convert the custom functions to their autograd equivalent first
model.convert_autograd()
model = torch.quantization.quantize_dynamic(
model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8
)
model.eval()
self.models.append(model)
self.model_types.append(model_opt.model)
# language model
if opt.lm is not None:
if opt.verbose:
print('Loading language model from %s' % opt.lm)
lm_chkpoint = torch.load(opt.lm, map_location=lambda storage, loc: storage)
lm_opt = lm_chkpoint['opt']
lm_model = build_language_model(lm_opt, checkpoint['dicts'])
if opt.fp16:
lm_model = lm_model.half()
if opt.cuda:
lm_model = lm_model.cuda()
else:
lm_model = lm_model.cpu()
self.lm_model = lm_model
self.cuda = opt.cuda
self.ensemble_op = opt.ensemble_op
if opt.autoencoder is not None:
if opt.verbose:
print('Loading autoencoder from %s' % opt.autoencoder)
checkpoint = torch.load(opt.autoencoder,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# posSize= checkpoint['autoencoder']['nmt.decoder.positional_encoder.pos_emb'].size(0)
# self.models[0].decoder.renew_buffer(posSize)
# self.models[0].decoder.renew_buffer(posSize)
# Build model from the saved option
self.autoencoder = Autoencoder(self.models[0], model_opt)
self.autoencoder.load_state_dict(checkpoint['autoencoder'])
if opt.cuda:
self.autoencoder = self.autoencoder.cuda()
self.models[0] = self.models[0].cuda()
else:
self.autoencoder = self.autoencoder.cpu()
self.models[0] = self.models[0].cpu()
self.models[0].autoencoder = self.autoencoder
if opt.verbose:
print('Done')
def build_asr_data(self, src_data, tgt_sents):
# This needs to be the same as preprocess.py.
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in tgt_sents]
return onmt.Dataset(src_data, tgt_data,
batch_size_words=sys.maxsize,
data_type=self._type, batch_size_sents=self.opt.batch_size)
def classify_batch(self, batches, sub_batches=None):
with torch.no_grad():
return self._classify_batch(batches, sub_batches=sub_batches)
def _classify_batch(self, batches, sub_batches):
batch = batches[0]
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# decoder output contains the log-prob distribution of the next step
# decoder_output = self.models[i].step(tokens, decoder_states[i])
model_outputs = self.models[i](batches[i])
logits = model_outputs['logits']
mask = model_outputs['src_mask']
mask = mask.squeeze(1).transpose(0, 1)
mask = mask.unsqueeze(-1)
logits.masked_fill_(mask, 0)
lengths = (1 - mask.long()).squeeze(-1).sum(dim=0, keepdim=False)
clean_logits = logits.sum(dim=0, keepdim=False).div(lengths.unsqueeze(-1))
probs = F.softmax(clean_logits.float(), dim=-1)
outs[i] = probs
probs = sum(outs.values())
probs.div_(self.n_models)
return probs
def build_data(self, src_sents, tgt_sents, type='mt', past_sents=None):
# This needs to be the same as preprocess.py.
if type == 'mt':
raise NotImplementedError
# if self.start_with_bos:
# src_data = [self.src_dict.convertToIdx(b,
# onmt.constants.UNK_WORD,
# onmt.constants.BOS_WORD)
# for b in src_sents]
# else:
# src_data = [self.src_dict.convertToIdx(b,
# onmt.constants.UNK_WORD)
# for b in src_sents]
# data_type = 'text'
# past_src_data = None
elif type == 'asr':
# no need to deal with this
src_data = src_sents
past_src_data = past_sents
data_type = 'audio'
else:
raise NotImplementedError
tgt_bos_word = self.opt.bos_token
if self.opt.no_bos_gold:
tgt_bos_word = None
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
tgt_bos_word,
onmt.constants.EOS_WORD) for b in tgt_sents]
src_lang_data = [torch.Tensor([self.lang_dict[self.src_lang]])]
# tgt_lang_data = [torch.Tensor([self.lang_dict[self.tgt_lang]])]
tgt_lang_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
data_type=data_type,
batch_size_sents=self.opt.batch_size,
src_align_right=self.opt.src_align_right,
past_src_data=past_src_data)
def predict(self, src_data):
type = 'asr'
# (1) convert words to indexes
if isinstance(src_data[0], list) and type == 'asr':
batches = list()
for i, src_data_ in enumerate(src_data):
dataset = self.build_data(src_data_, None, type=type, past_sents=None)
batch = dataset.get_batch(0)
batches.append(batch)
else:
dataset = self.build_data(src_data, None, type=type)
batch = dataset.get_batch(0) # this dataset has only one mini-batch
batches = [batch] * self.n_models
src_data = [src_data] * self.n_models
batch_size = batches[0].size
if self.cuda:
for i, _ in enumerate(batches):
batches[i].cuda(fp16=self.fp16)
# (2) translate
# each model in the ensemble uses one batch in batches
probs = self.classify_batch(batches)
# (3) convert indexes to words
pred_score = []
for b in range(batch_size):
pred_score.append(
probs[b].tolist()
)
return pred_score
| 10,872 | 35.609428 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/global_translator.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
from collections import defaultdict
class GlobalStreamTranslator(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.search = BeamSearch(self.tgt_dict)
self.eos = onmt.constants.EOS
self.pad = onmt.constants.PAD
self.bos = self.bos_id
self.vocab_size = self.tgt_dict.size()
self.min_len = 1
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
self.decoder_states = defaultdict(lambda: None)
self.prev_scores = torch.Tensor(self.opt.beam_size).fill_(0)
self.prev_lengths = torch.LongTensor(self.opt.beam_size).fill_(0)
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if hasattr(opt, 'dynamic_min_len_scale'):
self.dynamic_min_len_scale = opt.dynamic_min_len_scale
else:
self.dynamic_min_len_scale = 0.8
if opt.verbose:
print('* Current bos id: %d' % self.bos_id, onmt.constants.BOS)
print('* Using fast beam search implementation')
self.max_memory_size = opt.max_memory_size
for i in range(len(self.models)):
self.models[i].set_memory_size(self.max_memory_size, self.max_memory_size)
def reset_stream(self):
self.decoder_states = defaultdict(lambda: None)
def translateBatch(self, batch):
with torch.no_grad():
return self._translateBatch(batch)
def _translateBatch(self, batch):
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# initialize buffers
src = batch.get('source')
scores = src.new(bsz * beam_size, max_len + 1).float().fill_(0)
self.prev_scores = self.prev_scores.type_as(scores)
self.prev_lengths = self.prev_lengths.to(scores.device)
scores_buf = scores.clone()
tokens = src.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
beams = src.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
beams_buf = beams.clone()
tokens[:, 0].fill_(self.bos) # first token is bos
beams[:, 0].fill_(0) # first one is the same ...
attn, attn_buf = None, None
nonpad_idxs = None
src_tokens = src.transpose(0, 1) # batch x time
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
prefix_tokens = None
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
beams_clone = beams.index_select(0, bbsz_idx)
prev_lengths = self.prev_lengths.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
beams_clone = beams_clone[:, 0:step + 2]
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
raw_scores = eos_scores.clone()
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1 + prev_lengths) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
assert len(self.decoder_states) == 1
beam_buffers = self.decoder_states[0].get_beam_buffer(bbsz_idx)
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
# looks like sent and unfin_idx are both 0 when batch_size is 1 ...
# until everything is finished
sents_seen.add((sent, unfin_idx))
def get_buffer():
buffer = dict()
for l in beam_buffers:
buffer[l] = dict()
# take that state
for key in beam_buffers[l]:
buffer[l][key] = beam_buffers[l][key][:, i, :].unsqueeze(1)
return buffer
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
'hidden_buffer': get_buffer(),
'raw_score': raw_scores[i]
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
# initialize the decoder state, including:
# - expanding the context over the batch dimension len_src x (B*beam) x H
# - expanding the mask over the batch dimension (B*beam) x len_src
for i in range(self.n_models):
# decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size, type=2, streaming=False)
self.decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size,
previous_decoding_state=self.decoder_states[i],
streaming=True)
if self.dynamic_max_len:
src_len = src.size(0)
max_len = min(math.ceil(int(src_len) * self.dynamic_max_len_scale), self.opt.max_sent_length)
min_len = math.ceil(int(src_len) * self.dynamic_min_len_scale)
else:
min_len = self.min_len
# Start decoding
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
self.decoder_states[i]._reorder_incremental_state(reorder_state)
decode_input = tokens[:, :step + 1]
# lprobs size: [batch x beam x vocab_size]
lprobs, avg_attn_scores = self._decode(decode_input, self.decoder_states)
avg_attn_scores = None
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.bos] = -math.inf # never select bos ...
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
# if prefix_tokens is not None and step < prefix_tokens.size(1):
# prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
# prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
# prefix_mask = prefix_toks.ne(self.pad)
# lprobs[prefix_mask] = -math.inf
# lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
# -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs
# )
# # if prefix includes eos, then we should make sure tokens and
# # scores are the same across all beams
# eos_mask = prefix_toks.eq(self.eos)
# if eos_mask.any():
# # validate that the first beam matches the prefix
# first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
# eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
# target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
# assert (first_beam == target_prefix).all()
#
# def replicate_first_beam(tensor, mask):
# tensor = tensor.view(-1, beam_size, tensor.size(-1))
# tensor[mask] = tensor[mask][:, :1, :]
# return tensor.view(-1, tensor.size(-1))
#
# # copy tokens, scores and lprobs from the first beam to all beams
# tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
# scores = replicate_first_beam(scores, eos_mask_batch_dim)
# lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
initial_score=self.prev_scores
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
# when bsz = 1, cand_bbsz_idx is not different than cand_beams
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
# so: cand_bbsz_idx is a list of beam indices
# eos_bbsz_idx in the case of batch_size 1: a list of beam_indices in which the eos is reached
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
# if batch size == 1 then this block will not be touched
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
# if prefix_tokens is not None:
# prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.index_select(
beams[:, :step + 1], dim=0, index=active_bbsz_idx,
out=beams_buf[:, step + 1],
)
# add the cand_indices (words) into the token buffer of the last step
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=beams_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
# print(cand_indices.size(), cand_bbsz_idx.size())
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
beams, beams_buf = beams_buf, beams
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
# Re-encoding step
# for beam in range(self.opt.beam_size):
# " batch size = 1 "
# tensor = finalized[0][beam]['tokens']
# words = " ".join(self.tgt_dict.convertToLabels(tensor, onmt.constants.EOS, including_stop=False))
# beam_org = finalized[0][beam]['beam_origin']
# print(beam_org, words)
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
for sent in range(len(finalized)):
for beam in range(len(finalized[sent])):
tensor = finalized[sent][beam]['tokens']
words = self.tgt_dict.convertToLabels(tensor, onmt.constants.EOS, including_stop=False)
n_words = len(words)
buffer_state = finalized[sent][beam]['hidden_buffer']
sentence = " ".join(words)
# self.prev_scores[beam].fill_(finalized[sent][beam]['raw_score'])
# self.prev_lengths[beam].fill_(n_words + 2)
# assign the buffers to the decoder_states
# at this point, we need to somehow make zero padding
self.decoder_states[sent].set_beam_buffer(finalized[sent])
# self.decoder_states = defaultdict(lambda: None)
# Should we do it before sorting, or after sorting
# Step 1: revert the memory of the decoder to the starting point
# Done. they are the buffer_state
# Step 3: Re-select the buffer (
# print(tensor)
return finalized, gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# streaming = True in this case
decoder_output = self.models[i].step(tokens, decoder_states[i], streaming=True)
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# batch * beam x vocab_size
# outs[i] = self.models[i].generator(decoder_hidden)
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
# attn = attn[:, -1, :] # I dont know what this line means
attn = None # lol this is never used probably
return out, attn
def translate(self, src_data, tgt_data, type='mt'):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
# ~ batch = self.to_variable(dataset.next()[0])
batch_size = batch.size
# (2) translate
finalized, gold_score, gold_words, allgold_words = self.translateBatch(batch)
pred_length = []
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_score = []
for b in range(batch_size):
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words | 25,704 | 41.557947 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/fast_translator.py | import sys
import onmt
import onmt.modules
import torch
import math
from onmt.model_factory import build_model, optimize_model
from onmt.inference.search import BeamSearch, Sampling
from onmt.inference.translator import Translator
from onmt.constants import add_tokenidx
from options import backward_compatible
# buggy lines: 392, 442, 384
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class FastTranslator(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.src_bos = onmt.constants.SRC_BOS
self.src_eos = onmt.constants.SRC_EOS
self.src_pad = onmt.constants.SRC_PAD
self.src_unk = onmt.constants.SRC_UNK
self.tgt_bos = self.bos_id
self.tgt_pad = onmt.constants.TGT_PAD
self.tgt_eos = onmt.constants.TGT_EOS
self.tgt_unk = onmt.constants.TGT_UNK
if opt.sampling:
self.search = Sampling(self.tgt_dict)
else:
self.search = BeamSearch(self.tgt_dict)
self.vocab_size = self.tgt_dict.size()
self.min_len = opt.min_sent_length
print("min len:", self.min_len)
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
self.buffering = not opt.no_buffering
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if opt.verbose:
# print('* Current bos id is: %d, default bos id is: %d' % (self.tgt_bos, onmt.constants.BOS))
print("src bos id is %d; src eos id is %d; src pad id is %d; src unk id is %d"
% (self.src_bos, self.src_eos, self.src_pad, self.src_unk))
print("tgt bos id is %d; tgt eos id is %d; tgt_pad id is %d; tgt unk id is %d"
% (self.tgt_bos, self.tgt_eos, self.tgt_pad, self.tgt_unk))
print('* Using fast beam search implementation')
if opt.vocab_list:
print("[INFO] reading the list of words from %s" % opt.vocab_list)
word_list = list()
for line in open(opt.vocab_list).readlines():
word = line.strip()
word_list.append(word)
self.filter = torch.Tensor(self.tgt_dict.size()).zero_()
# the eos and unk have to be in here
for word_idx in [self.tgt_eos, self.tgt_unk]:
self.filter[word_idx] = 1
for word in word_list:
idx = self.tgt_dict.lookup(word)
if idx is not None:
self.filter[idx] = 1
else:
print("WARNING: word %s does not exist in the dictionary" % word)
self.filter = self.filter.bool()
if opt.cuda:
self.filter = self.filter.cuda()
self.use_filter = True
elif opt.vocab_id_list:
ids = torch.load(opt.vocab_id_list)
print('[INFO] Loaded word list with %d ids' % len(ids))
self.filter = torch.Tensor(self.tgt_dict.size()).zero_()
for id in ids:
self.filter[id] = 1
self.filter = self.filter.bool()
if opt.cuda:
self.filter = self.filter.cuda()
self.use_filter = True
else:
self.use_filter = False
# Sub-model is used for ensembling Speech and Text models
if opt.sub_model:
self.sub_models = list()
self.sub_model_types = list()
# models are string with | as delimiter
sub_models = opt.sub_model.split("|")
print("Loading sub models ... ")
self.n_sub_models = len(sub_models)
self.sub_type = 'text'
for i, model_path in enumerate(sub_models):
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
if hasattr(model_opt, "enc_not_load_state"):
model_opt.enc_not_load_state = True
model_opt.dec_not_load_state = True
dicts = checkpoint['dicts']
# update special tokens
onmt.constants = add_tokenidx(model_opt, onmt.constants, dicts)
# self.bos_token = model_opt.tgt_bos_word
""""BE CAREFUL: the sub-models might mismatch with the main models in terms of language dict"""
""""REQUIRE RE-matching"""
if i == 0:
if "src" in checkpoint['dicts']:
self.src_dict = checkpoint['dicts']['src']
if opt.verbose:
print('Loading sub-model from %s' % model_path)
model = build_model (model_opt, checkpoint['dicts'], remove_pretrain=True)
optimize_model(model)
model.load_state_dict(checkpoint['model'])
if model_opt.model in model_list:
# if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:
# print("Not enough len to decode. Renewing .. ")
# model.decoder.renew_buffer(self.opt.max_sent_length)
model.renew_buffer(self.opt.max_sent_length)
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
if opt.dynamic_quantile == 1:
engines = torch.backends.quantized.supported_engines
if 'fbgemm' in engines:
torch.backends.quantized.engine = 'fbgemm'
else:
print(
"[INFO] fbgemm is not found in the available engines. "
" Possibly the CPU does not support AVX2."
" It is recommended to disable Quantization (set to 0).")
torch.backends.quantized.engine = 'qnnpack'
model = torch.quantization.quantize_dynamic(
model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8
)
model.eval()
self.sub_models.append(model)
self.sub_model_types.append(model_opt.model)
else:
self.n_sub_models = 0
self.sub_models = []
if opt.ensemble_weight:
ensemble_weight = [float(item) for item in opt.ensemble_weight.split("|")]
assert len(ensemble_weight) == self.n_models
if opt.sub_ensemble_weight:
sub_ensemble_weight = [float(item) for item in opt.sub_ensemble_weight.split("|")]
assert len(sub_ensemble_weight) == self.n_sub_models
ensemble_weight = ensemble_weight + sub_ensemble_weight
total = sum(ensemble_weight)
self.ensemble_weight = [ item / total for item in ensemble_weight]
else:
self.ensemble_weight = None
# Pretrained Classifier is used for combining classifier and speech models
if opt.pretrained_classifier:
self.pretrained_clfs = list()
# models are string with | as delimiter
clfs_models = opt.pretrained_classifier.split("|")
self.n_clfs = len(clfs_models)
for i, model_path in enumerate(clfs_models):
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
clf_dicts = checkpoint['dicts']
if opt.verbose:
print('Loading pretrained classifier from %s' % model_path)
from onmt.model_factory import build_classifier
model = build_classifier(model_opt, clf_dicts)
optimize_model(model)
model.load_state_dict(checkpoint['model'])
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
if opt.dynamic_quantile == 1:
engines = torch.backends.quantized.supported_engines
if 'fbgemm' in engines:
torch.backends.quantized.engine = 'fbgemm'
else:
print(
"[INFO] fbgemm is not found in the available engines. "
" Possibly the CPU does not support AVX2."
" It is recommended to disable Quantization (set to 0).")
torch.backends.quantized.engine = 'qnnpack'
model = torch.quantization.quantize_dynamic(
model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8
)
model.eval()
self.pretrained_clfs.append(model)
else:
self.n_clfs = 0
self.pretrained_clfs = list()
if "mbart-large-50" in opt.external_tokenizer.lower():
print("[INFO] Using the external MBART50 tokenizer...")
from transformers import MBart50TokenizerFast
try:
self.external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50",
src_lang=opt.src_lang)
except KeyError as e:
self.external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50",
src_lang="en_XX")
try:
self.tgt_external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50",
src_lang=opt.tgt_lang)
except KeyError as e:
self.tgt_external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50",
src_lang="en_XX")
elif "m2m100" in opt.external_tokenizer.lower():
print("[INFO] Using the external %s tokenizer..." % opt.external_tokenizer)
from transformers import M2M100Tokenizer
self.external_tokenizer = M2M100Tokenizer.from_pretrained(opt.external_tokenizer, src_lang=opt.src_lang)
self.tgt_external_tokenizer = M2M100Tokenizer.from_pretrained(opt.external_tokenizer, src_lang=opt.tgt_lang)
elif "deltalm" in opt.external_tokenizer.lower():
# print("[INFO] Using the external %s tokenizer..." % opt.external_tokenizer)
# from pretrain_module.tokenization_deltalm import DeltaLMTokenizer
# self.external_tokenizer = DeltaLMTokenizer.from_pretrained("facebook/mbart-large-50", src_lang=opt.src_lang)
# self.tgt_external_tokenizer = DeltaLMTokenizer.from_pretrained("facebook/mbart-large-50", src_lang=opt.tgt_lang)
print("[INFO] Using the external %s tokenizer..." % opt.external_tokenizer)
lang_list = sorted(list(self.lang_dict.keys()))
from pretrain_module.tokenization_deltalm import MultilingualDeltaLMTokenizer
self.external_tokenizer = MultilingualDeltaLMTokenizer.from_pretrained("facebook/mbart-large-50",
lang_list=lang_list,
src_lang=opt.src_lang)
self.tgt_external_tokenizer = MultilingualDeltaLMTokenizer.from_pretrained("facebook/mbart-large-50",
lang_list=lang_list,
src_lang=opt.tgt_lang)
else:
self.external_tokenizer = None
self.tgt_external_tokenizer = None
def change_language(self, new_src_lang=None, new_tgt_lang=None, use_srclang_as_bos=True):
if new_src_lang is not None:
self.src_lang = new_src_lang
if new_tgt_lang is not None:
self.tgt_lang = new_tgt_lang
if use_srclang_as_bos:
self.bos_token = self.src_lang
self.bos_id = self.tgt_dict.labelToIdx[self.bos_token]
print("[INFO] New Bos Token: %s Bos_ID: %d" % (self.bos_token, self.bos_id))
else:
self.bos_token = self.tgt_lang
self.bos_id = self.tgt_dict.labelToIdx[self.bos_token]
print("[INFO] New Bos Token: %s Bos_ID: %d" % (self.bos_token, self.bos_id))
self.tgt_bos = self.bos_id
self.external_tokenizer.src_lang = self.src_lang
self.tgt_external_tokenizer.src_lang = self.tgt_lang
def translate_batch(self, batches, sub_batches=None, prefix_tokens=None, anti_prefix=None):
with torch.no_grad():
return self._translate_batch(batches, sub_batches=sub_batches, prefix_tokens=prefix_tokens,
anti_prefix=anti_prefix)
def _translate_batch(self, batches, sub_batches, prefix_tokens=None, anti_prefix=None):
batch = batches[0]
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode (also batches[0])
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# initialize buffers
src = batch.get('source')
scores = src.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src.new(bsz * beam_size, max_len + 2).long().fill_(self.tgt_pad)
tokens_buf = tokens.clone()
tokens[:, 0].fill_(self.tgt_bos) # first token is
# tokens[:, 1].fill_(self.tgt_bos) # first token is bos
attn, attn_buf = None, None
nonpad_idxs = None
src_tokens = src.transpose(0, 1) # batch x time
src_lengths = (src_tokens.ne(self.src_eos) & src_tokens.ne(self.src_pad)).long().sum(dim=1)
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
batchable_prefix = False
if prefix_tokens is not None:
prefix_tokens = prefix_tokens.to(src.device)
if bsz == 1:
batchable_prefix = True
else:
# check if padding is in prefix
pmask = prefix_tokens.eq(self.tgt_pad).long().sum()
if pmask.item() == 0:
batchable_prefix = True
if batchable_prefix:
prefix_tokens = prefix_tokens.repeat(beam_size, 1)
for b in range(bsz * beam_size):
for l in range(min(max_len + 2, prefix_tokens.size(1))):
tokens[b, l].fill_(prefix_tokens[b, l])
# In this case, the scores of the prefix positions should be 0
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.tgt_eos).any()
tokens_clone[:, step] = self.tgt_eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
# if self.match_source_len and step > src_lengths[unfin_idx]:
# score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
# initialize the decoder state, including:
# - expanding the context over the batch dimension len_src x (B*beam) x H
# - expanding the mask over the batch dimension (B*beam) x len_src
decoder_states = dict()
sub_decoder_states = dict() # for sub-model
for i in range(self.n_models):
# if self.opt.pretrained_classifier:
# pretrained_layer_states = self.pretrained_clfs[i].encode(batches[i])
# else:
# pretrained_layer_states = None
pretrained_clf = self.pretrained_clfs[i] if self.opt.pretrained_classifier else None
decoder_states[i] = self.models[i].create_decoder_state(batches[i], beam_size, type=2,
buffering=self.buffering,
pretrained_classifier=pretrained_clf)
if self.opt.sub_model:
for i in range(self.n_sub_models):
sub_decoder_states[i] = self.sub_models[i].create_decoder_state(sub_batches[i], beam_size, type=2,
buffering=self.buffering)
if self.dynamic_max_len:
src_len = src.size(0)
max_len = math.ceil(int(src_len) * self.dynamic_max_len_scale)
# Start decoding
if prefix_tokens is not None:
if batchable_prefix:
# for this case we run the whole prefix as a preparation step,
# decoding starts from the last of the prefix
step = prefix_tokens.size(1) - 1
else:
# in this case we run decoding as usual but filter the output words for prefix
step = 0
else:
step = 0
# step = 0 if (prefix_tokens is None and bsz == 1) else prefix_tokens.size(1) - 1
# for step in range(max_len + 1): # one extra step for EOS marker
while step < (max_len + 1):
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
decoder_states[i]._reorder_incremental_state(reorder_state)
for i, model in enumerate(self.sub_models):
sub_decoder_states[i]._reorder_incremental_state(reorder_state)
decode_input = tokens[:, :step + 1]
# print(batches[0].get('source'))
# print(decode_input)
lprobs, avg_attn_scores = self._decode(decode_input, decoder_states,
sub_decoder_states=sub_decoder_states)
avg_attn_scores = None
lprobs = lprobs.contiguous()
if self.use_filter:
# the marked words are 1, so fill the reverse to inf
lprobs.masked_fill_(~self.filter.unsqueeze(0), -math.inf)
lprobs[:, self.tgt_pad] = -math.inf # never select pad
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.tgt_eos] = -math.inf
lprobs[:, self.tgt_eos + 1:] = -math.inf
elif step < self.min_len:
lprobs[:, self.tgt_eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
# here prefix tokens is a list of word-ids
if prefix_tokens is not None and not batchable_prefix:
if step < prefix_tokens.size(1) and step < max_len:
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.tgt_pad)
# originally infinity here, this number can return nan so thats quite dangerous
# put a large negative number here is better
lprobs[prefix_mask] = torch.tensor(-21111993).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# lprobs[prefix_mask].scatter_()
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.tgt_eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
else:
# force tgt_eos to not appear
lprobs[:, self.tgt_eos] = -math.inf
if anti_prefix is not None:
# check the step closest to the end of anti prefix
if step == len(anti_prefix) - 1:
_anti_prefix = anti_prefix[step]
for i in range(tokens.size(0)):
decoded_ = tokens[i][1:step+1]
if decoded_.tolist() == anti_prefix[:-1]:
lprobs[i, _anti_prefix] = -math.inf
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.tgt_eos)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx.resize_(0),
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores.resize_(0),
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
# assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero(as_tuple=False).squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None and not batchable_prefix:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask.resize_(0),
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist.resize_(0), active_hypos.resize_(0))
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx.resize_(0),
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
step = step + 1
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized, gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states, sub_decoder_states=None):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# decoder output contains the log-prob distribution of the next step
decoder_output = self.models[i].step(tokens, decoder_states[i])
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
for j in range(self.n_sub_models):
sub_decoder_output = self.sub_models[j].step(tokens, sub_decoder_states[j])
outs[self.n_models + j] = sub_decoder_output['log_prob']
out = self._combine_outputs(outs, weight=self.ensemble_weight)
# attn = self._combine_attention(attns)
if self.vocab_size > out.size(-1):
self.vocab_size = out.size(-1) # what the hell ?
# attn = attn[:, -1, :] # I dont know what this line does
attn = None # attn is never used in decoding probably
return out, attn
def build_prefix(self, prefixes, bsz=None):
"""
:param bsz:
:param prefixes: List of strings
:return:
"""
if self.external_tokenizer is None:
prefix_data = [self.tgt_dict.convertToIdx(sent.split(),
onmt.constants.UNK_WORD)
for sent in prefixes]
else:
# move the last element which is <eos>
if self.opt.force_bos:
_prefix_data = [torch.LongTensor([self.bos_id] + self.external_tokenizer(sent)['input_ids'][:-1])
for sent in prefixes]
else:
_prefix_data = [torch.LongTensor(self.external_tokenizer(sent)['input_ids'][:-1])
for sent in prefixes]
prefix_data = _prefix_data
#
new_prefix_data = []
#
for prefix_tensor in prefix_data:
if "MultilingualDeltaLM" in self.external_tokenizer.__class__.__name__:
pass
else:
prefix_tensor[0] = self.bos_id
new_prefix_data.append(prefix_tensor)
#
prefix_data = new_prefix_data
# _listed_tensor = prefix_tensor.tolist()
# if _listed_tensor[0] == self.tgt_bos:
# _listed_tensor = _listed_tensor[1:]
# if _listed_tensor[0] == self.tgt_eos:
# _listed_tensor = _listed_tensor[:-1]
# prefix_data.append(torch.LongTensor(_listed_tensor))
# clone the same prefix for multiple sentences
if len(prefix_data) == 1 and bsz > 1:
prefix_data = prefix_data * bsz
# collate into the same tensor with padding
lengths = [x.size(0) for x in prefix_data]
max_length = max(lengths)
tensor = prefix_data[0].new(len(prefix_data), max_length).fill_(self.tgt_pad)
for i in range(len(prefix_data)):
data_length = prefix_data[i].size(0)
offset = 0
tensor[i].narrow(0, offset, data_length).copy_(prefix_data[i])
return tensor
def build_anti_prefix(self, anti_prefix):
"""
:param bsz:
:param prefixes: List of strings
:return:
"""
if self.external_tokenizer is None:
anti_prefix = self.tgt_dict.convertToIdx(anti_prefix.split(),
onmt.constants.UNK_WORD)
else:
# move the last element which is <eos>
# if self.opt.force_bos:
# _prefix_data = [torch.LongTensor([self.bos_id] + self.external_tokenizer(sent)['input_ids'][:-1])
# for sent in prefixes]
# else:
# _prefix_data = [torch.LongTensor(self.external_tokenizer(sent)['input_ids'][:-1])
# for sent in prefixes]
_anti_prefix_data = self.external_tokenizer(anti_prefix)['input_ids'][:-1]
_anti_prefix_data = _anti_prefix_data[1:]
anti_prefix = torch.LongTensor(_anti_prefix_data)
anti_prefix = anti_prefix.tolist()
return anti_prefix
# override the "build_data" from parent Translator
def build_data(self, src_sents, tgt_sents, type='mt', past_sents=None):
# This needs to be the same as preprocess.py.
data_type = 'text'
if type == 'mt':
if self.external_tokenizer is None:
# TODO: add external tokenizer
if self.start_with_bos:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in src_sents]
else:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in src_sents]
if past_sents is not None:
if self.start_with_bos:
past_src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in past_sents]
else:
past_src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in past_sents]
else:
past_src_data = None
else:
src_data = [torch.LongTensor(self.external_tokenizer(" ".join(b))['input_ids'])
for b in src_sents]
if past_sents is not None:
past_src_data = [torch.LongTensor(self.external_tokenizer(" ".join(b))['input_ids'])
for b in past_src_data]
else:
past_src_data = None
elif type == 'asr':
# no need to deal with this
src_data = src_sents
past_src_data = past_sents
data_type = 'audio'
elif type == 'asr_wav':
src_data = src_sents
past_src_data = past_sents
data_type = 'wav'
else:
raise NotImplementedError
tgt_bos_word = self.opt.bos_token
if self.opt.no_bos_gold:
tgt_bos_word = None
tgt_data = None
if tgt_sents:
if self.tgt_external_tokenizer is not None:
tgt_data = [torch.LongTensor(self.tgt_external_tokenizer(" ".join(b))['input_ids'])
for b in tgt_sents]
else:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
tgt_bos_word,
onmt.constants.EOS_WORD) for b in tgt_sents]
if self.src_lang in self.lang_dict:
src_lang_data = [torch.Tensor([self.lang_dict[self.src_lang]])]
else:
src_lang_data = [torch.Tensor([0])]
if self.tgt_lang in self.lang_dict:
tgt_lang_data = [torch.Tensor([self.lang_dict[self.tgt_lang]])]
else:
tgt_lang_data = [torch.Tensor([0])]
try:
src_atb = self.opt.src_atb
if src_atb in self.atb_dict:
src_atb_data = [torch.Tensor([self.atb_dict[src_atb]])]
else:
src_atb_data = None
except AttributeError:
src_atb_data = None
try:
tgt_atb = self.opt.tgt_atb
if tgt_atb in self.atb_dict:
tgt_atb_data = [torch.Tensor([self.atb_dict[tgt_atb]])]
else:
tgt_atb_data = None
except AttributeError:
tgt_atb_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
src_atbs=src_atb_data, tgt_atbs=tgt_atb_data,
batch_size_words=sys.maxsize,
batch_size_frames=sys.maxsize,
cut_off_size=sys.maxsize,
smallest_batch_size=sys.maxsize,
max_src_len=sys.maxsize,
data_type=data_type,
batch_size_sents=sys.maxsize,
src_align_right=self.opt.src_align_right,
past_src_data=past_src_data)
def translate(self, src_data, tgt_data, past_src_data=None, sub_src_data=None, type='mt',
prefix=None, anti_prefix=None):
if past_src_data is None or len(past_src_data) == 0:
past_src_data = None
# (1) convert words to indexes
if isinstance(src_data[0], list) and type in ['asr', 'asr_wav']:
batches = list()
for i, src_data_ in enumerate(src_data):
if past_src_data is not None:
past_src_data_ = past_src_data[i]
else:
past_src_data_ = None
dataset = self.build_data(src_data_, tgt_data, type=type, past_sents=past_src_data_)
batch = dataset.get_batch(0)
batches.append(batch)
elif isinstance(src_data[0], list) and isinstance(src_data[0][0], list):
src_data = src_data[0]
dataset = self.build_data(src_data, tgt_data, type=type, past_sents=past_src_data)
batch = dataset.get_batch(0) # this dataset has only one mini-batch
batches = [batch] * self.n_models
src_data = [src_data] * self.n_models
else:
dataset = self.build_data(src_data, tgt_data, type=type, past_sents=past_src_data)
batch = dataset.get_batch(0) # this dataset has only one mini-batch
batches = [batch] * self.n_models
src_data = [src_data] * self.n_models
if sub_src_data is not None and len(sub_src_data) > 0:
sub_dataset = self.build_data(sub_src_data, tgt_data, type='mt')
sub_batch = sub_dataset.get_batch(0)
sub_batches = [sub_batch] * self.n_sub_models
sub_src_data = [sub_src_data] * self.n_sub_models
else:
sub_batches, sub_src_data = None, None
batch_size = batches[0].size
if self.cuda:
for i, _ in enumerate(batches):
batches[i].cuda(fp16=self.fp16)
if sub_batches:
for i, _ in enumerate(sub_batches):
sub_batches[i].cuda(fp16=self.fp16)
if prefix is not None:
prefix_tensor = self.build_prefix(prefix, bsz=batch_size)
else:
prefix_tensor = None
if anti_prefix is not None:
anti_prefix = self.build_anti_prefix(anti_prefix)
print("ANTI PREFIX:", anti_prefix)
# (2) translate
# each model in the ensemble uses one batch in batches
finalized, gold_score, gold_words, allgold_words = self.translate_batch(batches, sub_batches=sub_batches,
prefix_tokens=prefix_tensor,
anti_prefix=anti_prefix)
pred_length = []
# (3) convert indexes to words
pred_batch = []
pred_ids = []
src_data = src_data[0]
for b in range(batch_size):
# probably when the src is empty so beam search stops immediately
if len(finalized[b]) == 0:
# assert len(src_data[b]) == 0, "The target search result is empty, assuming that the source is empty."
pred_batch.append(
[self.build_target_tokens([], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_ids.append([[] for n in range(self.opt.n_best)])
else:
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_ids.append([finalized[b][n]['tokens'] for n in range(self.opt.n_best)])
pred_score = []
for b in range(batch_size):
if len(finalized[b]) == 0:
pred_score.append(
[torch.FloatTensor([0])
for n in range(self.opt.n_best)]
)
else:
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_ids, pred_score, pred_length, gold_score, gold_words, allgold_words
| 48,877 | 42.641071 | 126 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/search.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import onmt
class Search(object):
def __init__(self, tgt_dict):
# self.pad = onmt.constants.PAD
# self.unk = onmt.constants.UNK
# self.eos = onmt.constants.EOS
# self.bos = onmt.constants.BOS
self.vocab_size = tgt_dict.size()
self.scores_buf = None
self.indices_buf = None
self.beams_buf = None
def _init_buffers(self, t):
if self.scores_buf is None:
self.scores_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
def step(self, step, lprobs, scores, beam_size):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
:param lprobs:
:param step:
:param scores:
:param beam_size:
"""
raise NotImplementedError
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
def step(self, step, lprobs, scores, initial_score=None, **kwargs):
super()._init_buffers(lprobs)
# batch size first, then beam size
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
if initial_score is None or torch.sum(initial_score).item() == 0:
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
lprobs.add_(initial_score.unsqueeze(-1))
# if we don't do this, the first beam will contain top K of exactly the same thing ...
else:
# make probs contain cumulative scores for each hypothesis
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# here lprobs should be (bsz, beam_size, V) (in streaming, bsz should be 1)
torch.topk(
lprobs.view(bsz, -1), # after view, it should be (bsz, beam_size x V)
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - beam_size, # -beam_size so we never select pad (beam_size times)
),
out=(self.scores_buf.resize_(0), self.indices_buf.resize_(0)),
)
# torch.div(self.indices_buf, vocab_size, out=self.beams_buf)
# beams_buf helps us know where the origin of each
self.beams_buf = torch.true_divide(self.indices_buf, vocab_size).long()
# indices: the word indices in the vocabulary
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.indices_buf, self.beams_buf
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.diversity_buf = None
self.beam = BeamSearch(tgt_dict)
def step(self, step, lprobs, scores):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
'DiverseBeamSearch requires --beam to be divisible by the number of groups'
)
group_size = beam_size // self.num_groups
# initialize diversity penalty
if self.diversity_buf is None:
self.diversity_buf = lprobs.new()
torch.zeros(lprobs[:, 0, :].size(), out=self.diversity_buf)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g::self.num_groups, :]
scores_g = scores[:, g::self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(lprobs_g, self.diversity_strength, self.diversity_buf.unsqueeze(1))
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(step, lprobs_g, scores_g)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
self.diversity_buf.scatter_add_(
1,
indices_buf,
self.diversity_buf.new_ones(indices_buf.size())
)
# interleave results from different groups
self.scores_buf = torch.stack(scores_G, dim=2, out=self.scores_buf).view(bsz, -1)
self.indices_buf = torch.stack(indices_G, dim=2, out=self.indices_buf).view(bsz, -1)
self.beams_buf = torch.stack(beams_G, dim=2, out=self.beams_buf).view(bsz, -1)
return self.scores_buf, self.indices_buf, self.beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens = None,
original_batch_idxs = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1),
beam_size,
replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1))
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
| 11,084 | 37.224138 | 112 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/ColdFusionTranslator.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model, build_fusion, build_language_model
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class EnsembleTranslator(object):
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
self.beam_accum = None
self.beta = opt.beta
self.alpha = opt.alpha
self.start_with_bos = opt.start_with_bos
self.fp16 = opt.fp16
self.models = list()
self.model_types = list()
# models are string with | as delimiter
models = opt.model.split("|")
print(models)
self.n_models = len(models)
self._type = 'text'
for i, model in enumerate(models):
if opt.verbose:
print('Loading model from %s' % model)
checkpoint = torch.load(model,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
if i == 0:
if "src" in checkpoint['dicts']:
self.src_dict = checkpoint['dicts']['src']
else:
self._type = "audio"
self.tgt_dict = checkpoint['dicts']['tgt']
# Build model from the saved option
# if hasattr(model_opt, 'fusion') and model_opt.fusion == True:
# print("* Loading a FUSION model")
# model = build_fusion(model_opt, checkpoint['dicts'])
# else:
# model = build_model(model_opt, checkpoint['dicts'])
model = build_model(model_opt)
model.load_state_dict(checkpoint['model'])
if model_opt.model in model_list:
# if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:
# print("Not enough len to decode. Renewing .. ")
# model.decoder.renew_buffer(self.opt.max_sent_length)
model.renew_buffer(self.opt.max_sent_length)
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
model.eval()
self.models.append(model)
self.model_types.append(model_opt.model)
# language model
if opt.lm is not None:
if opt.verbose:
print('Loading language model from %s' % opt.lm)
lm_chkpoint = torch.load(opt.lm, map_location=lambda storage, loc: storage)
lm_opt = lm_chkpoint['opt']
lm_model = build_language_model(lm_opt, lm_chkpoint['dicts'])
if opt.fp16:
lm_model = lm_model.half()
if opt.cuda:
lm_model = lm_model.cuda()
else:
lm_model = lm_model.cpu()
self.lm_model = lm_model
self.cuda = opt.cuda
self.ensemble_op = opt.ensemble_op
if opt.autoencoder is not None :
if opt.verbose:
print('Loading autoencoder from %s' % opt.autoencoder)
checkpoint = torch.load(opt.autoencoder,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
#posSize= checkpoint['autoencoder']['nmt.decoder.positional_encoder.pos_emb'].size(0)
#self.models[0].decoder.renew_buffer(posSize)
#self.models[0].decoder.renew_buffer(posSize)
# Build model from the saved option
self.autoencoder = Autoencoder(self.models[0],model_opt)
self.autoencoder.load_state_dict(checkpoint['autoencoder'])
if opt.cuda:
self.autoencoder = self.autoencoder.cuda()
self.models[0] = self.models[0].cuda()
else:
self.autoencoder = self.autoencoder.cpu()
self.models[0] = self.models[0].cpu()
if opt.fp16:
self.autoencoder = self.autoencoder.half()
self.models[0] = self.models[0].half()
if opt.verbose:
print('Done')
def init_beam_accum(self):
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
# Combine distributions from different models
def _combine_outputs(self, outputs):
if len(outputs) == 1:
return outputs[0]
if self.ensemble_op == "logSum":
output = (outputs[0])
# sum the log prob
for i in range(1, len(outputs)):
output += (outputs[i])
output.div(len(outputs))
# output = torch.log(output)
output = F.log_softmax(output, dim=-1)
elif self.ensemble_op == "mean":
output = torch.exp(outputs[0])
# sum the log prob
for i in range(1, len(outputs)):
output += torch.exp(outputs[i])
output.div(len(outputs))
# output = torch.log(output)
output = torch.log(output)
elif self.ensemble_op == 'gmean':
output = torch.exp(outputs[0])
# geometric mean of the probabilities
for i in range(1, len(outputs)):
output *= torch.exp(outputs[i])
# have to normalize
output.pow_(1.0 / float(len(outputs)))
norm_ = torch.norm(output, p=1, dim=-1)
output.div_(norm_.unsqueeze(-1))
output = torch.log(output)
else:
raise ValueError('Emsemble operator needs to be "mean" or "logSum", the current value is %s' % self.ensemble_op)
return output
# Take the average of attention scores
def _combine_attention(self, attns):
attn = attns[0]
for i in range(1, len(attns)):
attn += attns[i]
attn.div(len(attns))
return attn
def build_data(self, src_sents, tgt_sents):
# This needs to be the same as preprocess.py.
if self.start_with_bos:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in src_sents]
else:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in src_sents]
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in tgt_sents]
return onmt.Dataset(src_data, tgt_data, sys.maxsize
, data_type=self._type,
batch_size_sents =self.opt.batch_size)
def build_asr_data(self, src_data, tgt_sents):
# This needs to be the same as preprocess.py.
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in tgt_sents]
return onmt.Dataset(src_data, tgt_data, sys.maxsize,
data_type=self._type, batch_size_sents =self.opt.batch_size)
def build_target_tokens(self, pred, src, attn):
tokens = self.tgt_dict.convertToLabels(pred, onmt.constants.EOS)
tokens = tokens[:-1] # EOS
return tokens
def translate_batch(self, batch):
torch.set_grad_enabled(False)
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
batch_size = batch.size
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# time x batch * beam
# initialize the beam
beam = [onmt.Beam(beam_size, self.opt.cuda) for k in range(batch_size)]
batch_idx = list(range(batch_size))
remaining_sents = batch_size
decoder_states = dict()
for i in range(self.n_models):
decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size)
if self.opt.lm:
lm_decoder_states = self.lm_model.create_decoder_state(batch, beam_size)
for i in range(self.opt.max_sent_length):
# Prepare decoder input.
# input size: 1 x ( batch * beam )
input = torch.stack([b.getCurrentState() for b in beam
if not b.done]).t().contiguous().view(1, -1)
decoder_input = input
# require batch first for everything
outs = dict()
attns = dict()
for k in range(self.n_models):
# decoder_hidden, coverage = self.models[k].decoder.step(decoder_input.clone(), decoder_states[k])
decoder_output = self.models[k].step(decoder_input.clone(), decoder_states[k])
outs[k] = decoder_output['log_prob']
attns[k] = decoder_output['coverage']
# outs[k] = self.models[k].generator[0](decoder_hidden)
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[k] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# if(hasattr(self, 'autoencoder') and self.autoencoder
# and self.autoencoder.representation == "DecoderHiddenState"):
# decoder_hidden = self.autoencoder.autocode(decoder_hidden)
# batch * beam x vocab_size
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
if self.opt.lm:
lm_decoder_output = self.lm_model.step(decoder_input.clone(), lm_decoder_states)
# fusion
out = out + 0.3 * lm_decoder_output
word_lk = out.view(beam_size, remaining_sents, -1) \
.transpose(0, 1).contiguous()
attn = attn.view(beam_size, remaining_sents, -1) \
.transpose(0, 1).contiguous()
active = []
for b in range(batch_size):
if beam[b].done:
continue
idx = batch_idx[b]
if not beam[b].advance(word_lk.data[idx], attn.data[idx]):
active += [b]
for j in range(self.n_models):
decoder_states[j].update_beam(beam, b, remaining_sents, idx)
if not active:
break
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
active_idx = self.tt.LongTensor([batch_idx[k] for k in active])
batch_idx = {beam: idx for idx, beam in enumerate(active)}
for j in range(self.n_models):
decoder_states[j].prune_complete_beam(active_idx, remaining_sents)
remaining_sents = len(active)
# (4) package everything up
all_hyp, all_scores, all_attn = [], [], []
n_best = self.opt.n_best
all_lengths = []
for b in range(batch_size):
scores, ks = beam[b].sortBest()
all_scores += [scores[:n_best]]
hyps, attn, length = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])
all_hyp += [hyps]
all_lengths += [length]
# if(src_data.data.dim() == 3):
if self.opt.encoder_type == 'audio':
valid_attn = decoder_states[0].original_src.narrow(2,0,1).squeeze(2)[:, b].ne(onmt.constants.PAD) \
.nonzero().squeeze(1)
else:
valid_attn = decoder_states[0].original_src[:, b].ne(onmt.constants.PAD) \
.nonzero().squeeze(1)
attn = [a.index_select(1, valid_attn) for a in attn]
all_attn += [attn]
if self.beam_accum:
self.beam_accum["beam_parent_ids"].append(
[t.tolist()
for t in beam[b].prevKs])
self.beam_accum["scores"].append([
["%4f" % s for s in t.tolist()]
for t in beam[b].all_scores][1:])
self.beam_accum["predicted_ids"].append(
[[self.tgt_dict.getLabel(id)
for id in t.tolist()]
for t in beam[b].nextYs][1:])
torch.set_grad_enabled(True)
return all_hyp, all_scores, all_attn, all_lengths, gold_scores, gold_words, allgold_scores
def translate(self, src_data, tgt_data):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data)
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
pred, pred_score, attn, pred_length, gold_score, gold_words, allgold_words = self.translate_batch(batch)
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(pred[b][n], src_data[b], attn[b][n])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words,allgold_words
def translate_asr(self, src_data, tgt_data):
# (1) convert words to indexes
dataset = self.build_asr_data(src_data, tgt_data)
# src, tgt = batch
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
pred, pred_score, attn, pred_length, gold_score, gold_words,allgold_words = self.translate_batch(batch)
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(pred[b][n], src_data[b], attn[b][n])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words,allgold_words
| 15,611 | 35.138889 | 124 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/nam_translate.py | import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from torch.autograd import Variable
from onmt.model_factory import build_model
import torch.nn.functional as F
from onmt.inference.search import BeamSearch, DiverseBeamSearch
from onmt.inference.translator import Translator
model_list = ['transformer', 'stochastic_transformer']
class FastTranslator(Translator):
"""
A fast implementation of the Beam Search based translator
Based on Fairseq implementation
"""
def __init__(self, opt):
super().__init__(opt)
self.search = BeamSearch(self.tgt_dict)
self.eos = onmt.constants.EOS
self.pad = onmt.constants.PAD
self.bos = self.bos_id
self.vocab_size = self.tgt_dict.size()
self.min_len = 1
self.normalize_scores = opt.normalize
self.len_penalty = opt.alpha
self.buffering = not opt.no_buffering
if hasattr(opt, 'no_repeat_ngram_size'):
self.no_repeat_ngram_size = opt.no_repeat_ngram_size
else:
self.no_repeat_ngram_size = 0
if hasattr(opt, 'dynamic_max_len'):
self.dynamic_max_len = opt.dynamic_max_len
else:
self.dynamic_max_len = False
if hasattr(opt, 'dynamic_max_len_scale'):
self.dynamic_max_len_scale = opt.dynamic_max_len_scale
else:
self.dynamic_max_len_scale = 1.2
if opt.verbose:
print('* Current bos id: %d' % self.bos_id, onmt.constants.BOS)
print('* Using fast beam search implementation')
def translateBatch(self, batch, prefix=None):
with torch.no_grad():
return self._translateBatch(batch, prefix_tokens=prefix)
def _translateBatch(self, batch, prefix_tokens=None):
"""
:param batch:
:param prefix_tokens:
:return:
"""
# Batch size is in different location depending on data.
# prefix_tokens = None
beam_size = self.opt.beam_size
bsz = batch_size = batch.size
max_len = self.opt.max_sent_length
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# initialize buffers
src = batch.get('source')
scores = src.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0].fill_(self.bos) # first token is bos
attn, attn_buf = None, None
nonpad_idxs = None
src_tokens = src.transpose(0, 1) # batch x time
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
# if self.match_source_len and step > src_lengths[unfin_idx]:
# score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
# print(hypo_attn.shape)
# print(tokens_clone[i])
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
# initialize the decoder state, including:
# - expanding the context over the batch dimension len_src x (B*beam) x H
# - expanding the mask over the batch dimension (B*beam) x len_src
decoder_states = dict()
for i in range(self.n_models):
decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size, type=2, buffering=self.buffering)
len_context = decoder_states[i].context.size(0)
if self.dynamic_max_len:
src_len = src.size(0)
max_len = math.ceil(int(src_len) * self.dynamic_max_len_scale)
# Start decoding
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
decoder_states[i]._reorder_incremental_state(reorder_state)
decode_input = tokens[:, :step + 1]
lprobs, avg_attn_scores = self._decode(decode_input, decoder_states)
# avg_attn_scores = None
# lprobs[:, self.pad] = -math.inf # never select pad
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < self.min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
# prefix_tokens = torch.tensor([[798, 1354]]).type_as(tokens)
# prefix_tokens = [[1000, 1354, 2443, 1475, 1010, 242, 127, 1191, 902, 1808, 1589, 26]]
if prefix_tokens is not None:
prefix_tokens = torch.tensor(prefix_tokens).type_as(tokens)
if step < prefix_tokens.size(1) and step < max_len:
prefix_tokens = torch.tensor(prefix_tokens).type_as(tokens)
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, len_context , max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
# print(lprobs.shape)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero(as_tuple=False).squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized, gold_scores, gold_words, allgold_scores
def _decode(self, tokens, decoder_states):
# require batch first for everything
outs = dict()
attns = dict()
for i in range(self.n_models):
# tokens[:,-1] = tokens[0,-1]
decoder_output = self.models[i].step(tokens, decoder_states[i])
# take the last decoder state
# decoder_hidden = decoder_hidden.squeeze(1)
# attns[i] = coverage[:, -1, :].squeeze(1) # batch * beam x src_len
# batch * beam x vocab_size
# outs[i] = self.models[i].generator(decoder_hidden)
outs[i] = decoder_output['log_prob']
attns[i] = decoder_output['coverage']
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
if self.vocab_size > out.size(-1):
self.vocab_size = out.size(-13)
# attn = attn[:, -1, :] # I dont know what this line means
#attn = None # lol this is never used probably
return out, attn
def translate(self, src_data, tgt_data, type='mt'):
# (1) convert words to indexes
# for i in range(19999):
# print(32423)
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.get_batch(0)
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
finalized, gold_score, gold_words, allgold_words = self.translateBatch(batch)
print(finalized)
pred_length = []
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_score = []
for b in range(batch_size):
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
def translate_incl(self, src_data, tgt_data, prefix = None, type='mt'):
# (1) convert words to indexes
# for i in range(19999):
# print(32423)
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.get_batch(0)
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
finalized, gold_score, gold_words, allgold_words = self.translateBatch(batch, prefix = prefix)
pred_length = []
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(finalized[b][n]['tokens'], src_data[b], None)
for n in range(self.opt.n_best)]
)
pred_score = []
for b in range(batch_size):
pred_score.append(
[torch.FloatTensor([finalized[b][n]['score']])
for n in range(self.opt.n_best)]
)
return finalized[0][0], pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
| 23,188 | 40.483005 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/inference/translator.py | import onmt
import onmt.modules
import torch
from onmt.model_factory import build_model, build_language_model, optimize_model
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
from onmt.constants import add_tokenidx
from options import backward_compatible
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class Translator(object):
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
self.beam_accum = None
self.beta = opt.beta
self.alpha = opt.alpha
self.start_with_bos = opt.start_with_bos
self.fp16 = opt.fp16
self.attributes = opt.attributes # attributes split by |. for example: de|domain1
self.bos_token = opt.bos_token
self.sampling = opt.sampling
self.src_lang = opt.src_lang
self.tgt_lang = opt.tgt_lang
if self.attributes:
self.attributes = self.attributes.split("|")
self.models = list()
self.model_types = list()
# models are string with | as delimiter
models = opt.model.split("|")
print(models)
self.n_models = len(models)
self._type = 'text'
for i, model_path in enumerate(models):
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
if hasattr(model_opt, "enc_state_dict"):
model_opt.enc_state_dict = None
model_opt.dec_state_dict = None
self.main_model_opt = model_opt
dicts = checkpoint['dicts']
# update special tokens
onmt.constants = add_tokenidx(model_opt, onmt.constants, dicts)
# self.bos_token = model_opt.tgt_bos_word
if i == 0:
if "src" in checkpoint['dicts']:
self.src_dict = checkpoint['dicts']['src']
else:
self._type = "audio"
self.tgt_dict = checkpoint['dicts']['tgt']
if "langs" in checkpoint["dicts"]:
self.lang_dict = checkpoint['dicts']['langs']
else:
self.lang_dict = {'src': 0, 'tgt': 1}
if "atbs" in checkpoint["dicts"]:
self.atb_dict = checkpoint['dicts']['atbs']
else:
self.atb_dict = {'nothingness': 0}
self.bos_id = self.tgt_dict.labelToIdx[self.bos_token]
print("[INFO] Bos Token: %s Bos_ID: %d" % (self.bos_token, self.bos_id))
model = build_model(model_opt, checkpoint['dicts'], remove_pretrain=True)
if opt.verbose:
print('Loading model from %s' % model_path)
try:
model.load_state_dict(checkpoint['model'])
optimize_model(model)
except RuntimeError:
optimize_model(model)
try:
model.load_state_dict(checkpoint['model'])
except RuntimeError:
model.load_state_dict(checkpoint['model'], strict=True)
if model_opt.model in model_list:
# if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:
# print("Not enough len to decode. Renewing .. ")
# model.decoder.renew_buffer(self.opt.max_sent_length)
model.renew_buffer(self.opt.max_sent_length)
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
if opt.dynamic_quantile == 1:
engines = torch.backends.quantized.supported_engines
if 'fbgemm' in engines:
torch.backends.quantized.engine = 'fbgemm'
else:
print("[INFO] fbgemm is not found in the available engines. Possibly the CPU does not support AVX2."
" It is recommended to disable Quantization (set to 0).")
torch.backends.quantized.engine = 'qnnpack'
# convert the custom functions to their autograd equivalent first
model.convert_autograd()
model = torch.quantization.quantize_dynamic(
model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8
)
model.eval()
self.models.append(model)
self.model_types.append(model_opt.model)
# language model
if opt.lm is not None:
if opt.verbose:
print('Loading language model from %s' % opt.lm)
lm_chkpoint = torch.load(opt.lm, map_location=lambda storage, loc: storage)
lm_opt = lm_chkpoint['opt']
lm_model = build_language_model(lm_opt, checkpoint['dicts'])
if opt.fp16:
lm_model = lm_model.half()
if opt.cuda:
lm_model = lm_model.cuda()
else:
lm_model = lm_model.cpu()
self.lm_model = lm_model
self.cuda = opt.cuda
self.ensemble_op = opt.ensemble_op
if opt.autoencoder is not None:
if opt.verbose:
print('Loading autoencoder from %s' % opt.autoencoder)
checkpoint = torch.load(opt.autoencoder,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# posSize= checkpoint['autoencoder']['nmt.decoder.positional_encoder.pos_emb'].size(0)
# self.models[0].decoder.renew_buffer(posSize)
# self.models[0].decoder.renew_buffer(posSize)
# Build model from the saved option
self.autoencoder = Autoencoder(self.models[0], model_opt)
self.autoencoder.load_state_dict(checkpoint['autoencoder'])
if opt.cuda:
self.autoencoder = self.autoencoder.cuda()
self.models[0] = self.models[0].cuda()
else:
self.autoencoder = self.autoencoder.cpu()
self.models[0] = self.models[0].cpu()
self.models[0].autoencoder = self.autoencoder
if opt.verbose:
print('Done')
def init_beam_accum(self):
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
# Combine distributions from different models
def _combine_outputs(self, outputs, weight=None):
if weight is None:
weight = [1.0/len(outputs) for _ in range(len(outputs))]
# in case outputs have difference vocabulary sizes: take the shortest common one
sizes = [output_.size(-1) for output_ in outputs.values()]
min_size = min(sizes)
for key in outputs:
outputs[key] = outputs[key][:, :min_size]
# outputs = resized_outputs
if len(outputs) == 1:
return outputs[0]
if self.ensemble_op == "logSum":
output = (outputs[0]) * weight[0]
# sum the log prob
for i in range(1, len(outputs)):
output += (outputs[i] * weight[i])
# output.div_(len(outputs))
output = F.log_softmax(output, dim=-1)
elif self.ensemble_op == "mean": # default one
output = torch.exp(outputs[0]) * weight[0]
# sum the log prob
for i in range(1, len(outputs)):
output += torch.exp(outputs[i]) * weight[i]
# output.div_(len(outputs))
output = torch.log(output)
elif self.ensemble_op == "max":
output = outputs[0]
for i in range(1, len(outputs)):
output = torch.max(output, outputs[i])
elif self.ensemble_op == "min":
output = outputs[0]
for i in range(1, len(outputs)):
output = torch.min(output, outputs[i])
elif self.ensemble_op == 'gmean':
output = torch.exp(outputs[0])
# geometric mean of the probabilities
for i in range(1, len(outputs)):
output *= torch.exp(outputs[i])
# have to normalize
output.pow_(1.0 / float(len(outputs)))
norm_ = torch.norm(output, p=1, dim=-1)
output.div_(norm_.unsqueeze(-1))
output = torch.log(output)
else:
raise ValueError(
'Emsemble operator needs to be "mean" or "logSum", the current value is %s' % self.ensemble_op)
return output
# Take the average of attention scores
def _combine_attention(self, attns):
attn = attns[0]
for i in range(1, len(attns)):
attn += attns[i]
attn.div(len(attns))
return attn
def build_data(self, src_sents, tgt_sents, type='mt'):
# This needs to be the same as preprocess.py.
if type == 'mt':
if self.start_with_bos:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in src_sents]
else:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in src_sents]
data_type = 'text'
elif type == 'asr':
# no need to deal with this
src_data = src_sents
data_type = 'audio'
else:
raise NotImplementedError
tgt_bos_word = self.opt.bos_token
if self.opt.no_bos_gold:
tgt_bos_word = None
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
tgt_bos_word,
onmt.constants.EOS_WORD) for b in tgt_sents]
src_lang_data = [torch.Tensor([self.lang_dict[self.src_lang]])]
tgt_lang_data = [torch.Tensor([self.lang_dict[self.tgt_lang]])]
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
data_type=data_type,
batch_size_sents=self.opt.batch_size,
src_align_right=self.opt.src_align_right)
def build_asr_data(self, src_data, tgt_sents):
# This needs to be the same as preprocess.py.
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in tgt_sents]
return onmt.Dataset(src_data, tgt_data,
batch_size_words=sys.maxsize,
data_type=self._type, batch_size_sents=self.opt.batch_size)
def build_target_tokens(self, pred, src, attn):
tokens = self.tgt_dict.convertToLabels(pred, onmt.constants.EOS)
tokens = tokens[:-1] # EOS
return tokens
def translate_batch(self, batch):
if isinstance(batch, list):
batch = batch[0]
torch.set_grad_enabled(False)
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
batch_size = batch.size
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
# (3) Start decoding
# time x batch * beam
# initialize the beam
beam = [onmt.Beam(beam_size, self.bos_id, self.opt.cuda, self.opt.sampling) for k in range(batch_size)]
batch_idx = list(range(batch_size))
remaining_sents = batch_size
decoder_states = dict()
for i in range(self.n_models):
decoder_states[i] = self.models[i].create_decoder_state(batch, beam_size)
if self.opt.lm:
lm_decoder_states = self.lm_model.create_decoder_state(batch, beam_size)
for i in range(self.opt.max_sent_length):
# Prepare decoder input.
# input size: 1 x ( batch * beam )
input = torch.stack([b.getCurrentState() for b in beam
if not b.done]).t().contiguous().view(1, -1)
decoder_input = input
# require batch first for everything
outs = dict()
attns = dict()
for k in range(self.n_models):
# decoder_hidden, coverage = self.models[k].decoder.step(decoder_input.clone(), decoder_states[k])
# run decoding on the model
decoder_output = self.models[k].step(decoder_input.clone(), decoder_states[k])
# extract the required tensors from the output (a dictionary)
outs[k] = decoder_output['log_prob']
attns[k] = decoder_output['coverage']
# for ensembling models
out = self._combine_outputs(outs)
attn = self._combine_attention(attns)
# for lm fusion
if self.opt.lm:
lm_decoder_output = self.lm_model.step(decoder_input.clone(), lm_decoder_states)
# fusion
lm_out = lm_decoder_output['log_prob']
# out = out + 0.3 * lm_out
out = lm_out
word_lk = out.view(beam_size, remaining_sents, -1) \
.transpose(0, 1).contiguous()
attn = attn.contiguous().view(beam_size, remaining_sents, -1) \
.transpose(0, 1).contiguous()
active = []
for b in range(batch_size):
if beam[b].done:
continue
idx = batch_idx[b]
if not beam[b].advance(word_lk.data[idx], attn.data[idx]):
active += [b]
for j in range(self.n_models):
decoder_states[j].update_beam(beam, b, remaining_sents, idx)
if self.opt.lm:
lm_decoder_states.update_beam(beam, b, remaining_sents, idx)
if not active:
break
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
active_idx = self.tt.LongTensor([batch_idx[k] for k in active])
batch_idx = {beam: idx for idx, beam in enumerate(active)}
for j in range(self.n_models):
decoder_states[j].prune_complete_beam(active_idx, remaining_sents)
if self.opt.lm:
lm_decoder_states.prune_complete_beam(active_idx, remaining_sents)
remaining_sents = len(active)
# (4) package everything up
all_hyp, all_scores, all_attn = [], [], []
n_best = self.opt.n_best
all_lengths = []
for b in range(batch_size):
scores, ks = beam[b].sortBest()
all_scores += [scores[:n_best]]
hyps, attn, length = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])
all_hyp += [hyps]
all_lengths += [length]
# if(src_data.data.dim() == 3):
if self.opt.encoder_type == 'audio':
valid_attn = decoder_states[0].original_src.narrow(2, 0, 1).squeeze(2)[:, b].ne(onmt.constants.PAD) \
.nonzero().squeeze(1)
else:
valid_attn = decoder_states[0].original_src[:, b].ne(onmt.constants.PAD) \
.nonzero().squeeze(1)
# print(valid_attn)
# for a in attn:
# print(a.shape)
attn = [a for a in attn]
all_attn += [attn]
if self.beam_accum:
self.beam_accum["beam_parent_ids"].append(
[t.tolist()
for t in beam[b].prevKs])
self.beam_accum["scores"].append([
["%4f" % s for s in t.tolist()]
for t in beam[b].all_scores][1:])
self.beam_accum["predicted_ids"].append(
[[self.tgt_dict.getLabel(id)
for id in t.tolist()]
for t in beam[b].nextYs][1:])
torch.set_grad_enabled(True)
return all_hyp, all_scores, all_attn, all_lengths, gold_scores, gold_words, allgold_scores
def translate(self, src_data, tgt_data, type="mt"):
if isinstance(src_data[0], list) and type == 'asr':
batches = list()
for src_data_ in src_data:
dataset = self.build_data(src_data_, tgt_data, type=type)
batch = dataset.get_batch(0)
batches.append(batch)
else:
dataset = self.build_data(src_data, tgt_data, type=type)
batch = dataset.get_batch(0) # this dataset has only one mini-batch
batches = [batch] * self.n_models
src_data = [src_data] * self.n_models
if self.cuda:
for i, _ in enumerate(batches):
batches[i].cuda(fp16=self.fp16)
batch_size = batches[0].size
# (2) translate
pred, pred_score, attn, pred_length, gold_score, gold_words, allgold_words = self.translate_batch(batches)
# (3) convert indexes to words
src_data = src_data[0]
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(pred[b][n], src_data[b], attn[b][n])
for n in range(self.opt.n_best)]
)
pred_ids = pred
return pred_batch, pred_score, pred_length, pred, gold_score, gold_words, allgold_words
def translate_asr(self, src_data, tgt_data):
# (1) convert words to indexes
dataset = self.build_asr_data(src_data, tgt_data)
# src, tgt = batch
batch = dataset.get_batch(0)
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
pred, pred_score, attn, pred_length, gold_score, gold_words, allgold_words = self.translate_batch(batch)
# (3) convert indexes to words
pred_batch = []
for b in range(batch_size):
pred_batch.append(
[self.build_target_tokens(pred[b][n], src_data[b], attn[b][n])
for n in range(self.opt.n_best)]
)
return pred_batch, pred_score, pred_length, gold_score, gold_words, allgold_words
| 19,446 | 35.485929 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/stochastic_transformer_layers.py | import torch
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
class StochasticEncoderLayer(EncoderLayer):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, death_rate=0.0):
super().__init__(h, d_model, p, d_ff, attn_p, version)
# super(StochasticEncoderLayer, self).__init__()
self.death_rate = death_rate
def forward(self, input, attn_mask):
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
out, _ = self.multihead(query, query, query, attn_mask)
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input),)
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input
class StochasticDecoderLayer(DecoderLayer):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, death_rate=0.0):
super().__init__(h, d_model, p, d_ff, attn_p, version)
self.death_rate = death_rate
def forward(self, input, context, mask_tgt, mask_src):
""" Self attention layer
layernorm > attn > dropout > residual
"""
"""
input is 'unnormalized' so the first preprocess layer is to normalize it before attention
output (input after stacked with other outputs) is also unnormalized (to be normalized in the next layer)
so if we skip the layer and propagate input forward:
"""
coverage = None
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
self_context = query
out, _ = self.multihead_tgt(query, self_context, self_context, mask_tgt)
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input)
out, coverage = self.multihead_src(query, context, context, mask_src)
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# During testing we scale the output to match its participation during training
if self.training:
out = out / ( 1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input, coverage
| 4,693 | 30.716216 | 117 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/stochastic_transformers.py | import numpy as np
import torch, math
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PositionalEncoding
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.legacy.stochastic_transformer_layers import StochasticEncoderLayer, StochasticDecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder
from onmt.modules.base_seq2seq import NMTModel, Reconstructor
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, PrePostProcessing
from onmt.modules.linear import FeedForward, FeedForwardSwish
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
def expected_length(length, death_rate):
e_length = 0
for l in range(length):
survival_rate = 1.0 - (l+1)/length*death_rate
e_length += survival_rate
return e_length
class StochasticTransformerEncoder(TransformerEncoder):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, dicts, positional_encoder, encoder_type='text'):
self.death_rate = opt.death_rate
# build_modules will be called from the inherited constructor
super(StochasticTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type)
e_length = expected_length(self.layers, self.death_rate)
print("Stochastic Encoder with %.2f expected layers" % e_length)
def build_modules(self):
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = ( l + 1.0 ) / self.layers * self.death_rate
block = StochasticEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout, death_rate=death_r)
self.layer_modules.append(block)
class StochasticTransformerDecoder(TransformerDecoder):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder, attribute_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
# build_modules will be called from the inherited constructor
super(StochasticTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
attribute_embeddings,
ignore_source)
e_length = expected_length(self.layers, self.death_rate)
print("Stochastic Decoder with %.2f expected layers" % e_length)
def build_modules(self):
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = ( l + 1 ) / self.layers * self.death_rate
block = StochasticDecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout, death_rate=death_r)
self.layer_modules.append(block)
| 3,473 | 32.085714 | 143 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/DynamicTransformer/Dlcl.py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Wang Qiang
@contact: wangqiangneu@gmail.com
@desc: connection schema between layers
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class DynamicLinearCombination(nn.Module):
"""Implementation of Dynamic Linear Combination of Layers (DLCL)
for pre-norm, x_{l+1} = \sum_{k=0}^{l}{W_k^{l+1}LN(y_k)}
for post-norm, x_{l+1} = LN(\sum_{k=0}^{l}{W_k^{l+1}y_k})
"""
def __init__(self, model_size, n_layers, is_encoder=True, include_sublayer=False):
super(DynamicLinearCombination, self).__init__()
self.normalize_learned_weight = True
self.normalized_weight = None
self.weight_type = 'scalar'
self.out_dropout = 0.0
self.normalize_before = True
self.dim = model_size
# transformer encoder has 2 sub-layers, decoder has 3 sub-layers
if include_sublayer:
layer_num = 1 + (2 * n_layers if is_encoder else 3 * n_layers)
else:
layer_num = 1 + (n_layers if is_encoder else n_layers)
# init weights and corresponding masks
learnable = True
# combine everything from the past
self.history_window_size = -1
self.weight, self.weight_mask = self._init(layer_num, 'avg', self.weight_type,
-1, learnable)
normalize_embed = False
# init triangular layer norm
if normalize_embed:
self.layer_norms = nn.ModuleList([nn.LayerNorm(self.dim) for _ in range(layer_num)])
else:
self.layer_norms = nn.ModuleList([nn.Sequential()] + [nn.LayerNorm(self.dim) for _ in range(layer_num-1)])
# states
self.count = 0
self.layers = []
@staticmethod
def _init_mask(n_layer, window_size):
mask = np.zeros([n_layer, n_layer], dtype=np.float32)
# all preceding layers
if window_size == -1:
for i in range(mask.shape[0]):
mask[i, :(i+1)] = 1
else:
for i in range(mask.shape[0]):
mask[i, max(0, i + 1 - window_size): (i+1)] = 1
return torch.from_numpy(mask)
@staticmethod
def _init_weight(np_mask, dim=1, init_value='avg', learnable=True):
np_weight = np.copy(np_mask)
if init_value == 'avg':
np_weight = np_weight / np.sum(np_weight, axis=1, keepdims=True)
elif init_value == 'one':
np_weight[:, :] = 1.
else:
raise ValueError('unknown init_value:{}'.format(init_value))
weight_tensor = torch.from_numpy(np_weight).unsqueeze(2)
if dim > 1:
weight_tensor = weight_tensor.repeat(1, 1, dim)
weight_tensor = torch.nn.Parameter(weight_tensor, requires_grad=learnable)
return weight_tensor
def _init(self, layer_num, init_value, weight_type, window_size=-1, learnable=True):
"""
:param layer_num: total layers
:param init_value: initial weight value
:param weight_type: granularity of learned weights (scalar, scalar_X, vector)
:param window_size: past windows size of layers
:param learnable: if allow to learn weights
:return:
weight_tensor:
1. L x L x 1 if weight type='scalar'
2. L x L x X if weight type='scalar_X'
3. L x L x H if weight type='vector'
weight_mask: L x L, 0 means padding
"""
"""
weight shape is:
1. L x L x 1 for weight type='scalar'
2. L x L x X for weight type='scalar_X'
3. L x L x H for weight type='vector'
mask shape is L x L
:return:
"""
# L x L
mask_tensor = self._init_mask(layer_num, window_size)
if weight_type == 'scalar':
self.last_dim = 1
elif weight_type == 'vector':
self.last_dim = self.dim
elif weight_type.startswith('scalar_'):
n = int(weight_type.split('_')[1])
assert self.dim % n == 0
self.last_dim = n
else:
raise ValueError('unknown weight_type:{}'.format(weight_type))
weight_tensor = self._init_weight(mask_tensor.numpy(), self.last_dim, init_value,
learnable=learnable)
return weight_tensor, mask_tensor
def push(self, layer):
self.count += 1
# first layer
if self.count == 1:
self.layers.append(self.layer_norms[0](layer))
# compatible when running on CPU
if layer.is_cuda and not self.weight_mask.is_cuda:
self.weight_mask = self.weight_mask.cuda()
if self.normalize_learned_weight:
weight = self.weight.masked_fill((self.weight_mask == 0).unsqueeze(2), float('-inf'))
self.normalized_weight = F.softmax(weight, dim=1)
return
# following layer
if self.normalize_before:
layer = self.layer_norms[self.count-1](layer)
self.layers.append(layer)
def _pick_weights(self):
weight = self.normalized_weight if self.normalize_learned_weight else self.weight
weight = weight[self.count - 1, : self.count, :].view(-1, 1, 1, self.last_dim)
return weight
def pop(self):
assert len(self.layers) > 0
# D x 1 x 1 x [1, H/G, H]
weights = self._pick_weights()
# D x T x B x H
layers = torch.stack(self.layers, 0)
# linear combination
if self.weight_type in ['scalar', 'vector']:
ret = (layers * weights).sum(0)
else:
D, T, B, H = layers.size()
layers = layers.view(D, T, B, -1, weights.size(-1))
weights = weights.unsqueeze(3)
ret = (layers * weights).sum(0).view(T, B, H)
if self.normalize_before:
if self.out_dropout > 0:
return F.dropout(ret, p=self.out_dropout, training=self.training)
else:
return ret
if self.out_dropout > 0:
return F.dropout(self.layer_norms[self.count-1](ret), p=self.out_dropout, training=self.training)
else:
return self.layer_norms[self.count-1](ret)
def clean(self):
self.count = 0
self.layers = []
def forward(self):
pass | 6,453 | 35.88 | 118 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/DynamicTransformer/Models.py | import math
import torch
import onmt
from onmt.legacy.DynamicTransformer.Dlcl import DynamicLinearCombination
from onmt.models.transformers import TransformerEncoder, TransformerDecoder
from onmt.modules.dropout import embedded_dropout
from torch.utils.checkpoint import checkpoint
class DlclTransformerEncoder(TransformerEncoder):
"""Transformer encoder."""
def __init__(self, opt, dicts, positional_encoder, encoder_type='text'):
super().__init__(opt, dicts, positional_encoder, encoder_type)
self.history = DynamicLinearCombination(self.model_size, self.layers, is_encoder=True)
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: batch_size x len_src
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
# clean layer history
self.history.clean()
# Embedding: batch_size x len_src x d_model
if self.input_type == "text":
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
else:
mask_src = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
# Scale the emb by sqrt(d_model)
emb = emb * math.sqrt(self.model_size)
# Adding positional encoding
emb = self.time_transformer(emb)
# Dropout
emb = self.preprocess_layer(emb)
# B x T x H -> T x B x H
context = emb.transpose(0, 1).contiguous()
self.history.push(context)
for i, layer in enumerate(self.layer_modules):
context = self.history.pop()
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
context = checkpoint(custom_layer(layer), context, mask_src)
else:
context = layer(context, mask_src) # batch_size x len_src x d_model
self.history.push(context)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.history.pop()
context = self.postprocess_layer(context)
output_dict = {'context': context, 'src_mask': mask_src}
# return context, mask_src
return output_dict
class DlclTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, attribute_embeddings=None, ignore_source=False):
super().__init__(opt, dicts, positional_encoder,
attribute_embeddings=attribute_embeddings, ignore_source=ignore_source)
self.history = DynamicLinearCombination(self.model_size, self.layers, is_encoder=False)
def forward(self, input, context, src, atbs=None, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
self.history.clean()
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
if self.use_feature:
atb_emb = self.attribute_embeddings(atbs).unsqueeze(1).repeat(1, emb.size(1)) # B x H to 1 x B x H
emb = torch.cat([emb, atb_emb], dim=-1)
emb = torch.relu(self.feature_projector(emb))
if context is not None:
if self.encoder_type == "audio":
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
if context is not None:
if self.encoder_type == "audio":
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.transpose(0, 1).contiguous()
self.history.push(output)
for i, layer in enumerate(self.layer_modules):
output = self.history.pop()
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
output, coverage = checkpoint(custom_layer(layer), output, context, mask_tgt, mask_src)
# batch_size x len_src x d_model
else:
output, coverage = layer(output, context, mask_tgt, mask_src) # batch_size x len_src x d_model
# write into memory
self.history.push(output)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.history.pop()
output = self.postprocess_layer(output)
output_dict = { 'hidden': output, 'coverage': coverage }
# return output, None
return output_dict
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
self.history.clean()
context = decoder_state.context
buffers = decoder_state.attention_buffers
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
atbs = decoder_state.tgt_atb
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
""" Adding positional encoding """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
emb = self.time_transformer(emb, t=input.size(1))
else:
# prev_h = buffer[0] if buffer is None else None
# emb = self.time_transformer(emb, prev_h)
# buffer[0] = emb[1]
raise NotImplementedError
if isinstance(emb, tuple):
emb = emb[0]
# emb should be batch_size x 1 x dim
if self.use_feature:
atb_emb = self.attribute_embeddings(atbs).unsqueeze(1).expand_as(emb) # B x H to 1 x B x H
emb = torch.cat([emb, atb_emb], dim=-1)
emb = torch.relu(self.feature_projector(emb))
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
if context is not None:
if self.encoder_type == "audio" and src.data.dim() == 3:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
output = emb.contiguous()
self.history.push(output)
for i, layer in enumerate(self.layer_modules):
output = self.history.pop()
buffer = buffers[i] if i in buffers else None
assert(output.size(0) == 1)
output, coverage, buffer = layer.step(output, context, mask_tgt, mask_src, buffer=buffer)
decoder_state.update_attention_buffer(buffer, i)
self.history.push(output)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.history.pop()
output = self.postprocess_layer(output)
return output, coverage | 9,788 | 36.505747 | 114 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.