repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
RegularizedBN
RegularizedBN-main/fairseq/modules/norm/mask_anchornorm.py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : MaskBatchNorm.py # Distributed under MIT License. import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from torch.nn.parameter import Parameter import numpy as np from scipy import io __all__ = ['MaskAnchorNorm'] def min_len(tensor): #tensor: [T,B,C] tokens = (tensor!=0).all(dim=-1) length = tokens.sum(dim=0).long() return length.min() class MaskAnchorNorm(nn.Module): """ An implementation of masked batch normalization, used for testing the numerical stability. """ __count = 0 def update_cnt(self): MaskAnchorNorm.__count += 1 def __init__(self, num_features, eps=1e-5, momentum=0.05, affine=True, position=0, num=1, with_seq = False, prefix = 'None'): super().__init__() self.id = self.__count self.prefix = prefix self.position = position self.num = num #print(self.id) self.update_cnt() self.num_features = num_features self.eps = eps self.affine = affine self.momentum = momentum if self.affine: self.weight = Parameter(torch.Tensor(num_features)) self.bias = Parameter(torch.Tensor(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): self.reset_running_stats() if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def reset_running_stats(self): self.running_mean.zero_() self.running_var.fill_(1) def extra_repr(self): return '{num_features}, eps={eps}, affine={affine}'.format(**self.__dict__) def forward(self, input, pad_mask=None, is_encoder=False): """ input: T x B x C -> B x C x T : B x C x T -> T x B x C pad_mask: B x T (padding is True) """ T, B, C = input.shape num = min(min_len(input)-self.position,self.num) #input = input.contiguous() if self.training: data = input[self.position:self.position+num,:,:] var, mean = torch.var_mean(data,dim=(0,1)) with torch.no_grad(): self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data self.running_var = (1 - self.momentum) * self.running_var + self.momentum * var.data else: mean, var = self.running_mean, self.running_var input = (input - mean)/torch.sqrt(var.clamp(self.eps)) if self.affine: output = input*self.weight + self.bias else: output = input return output
2,951
32.168539
103
py
RegularizedBN
RegularizedBN-main/fairseq/modules/norm/mask_groupnorm.py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : groupnorm.py # Distributed under MIT License. import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F def tile(a, repeats, dim): """ Substitute for numpy's repeat function. Taken from https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853/2 torch.repeat([1,2,3], 2) --> [1, 2, 3, 1, 2, 3] np.repeat([1,2,3], repeats=2, axis=0) --> [1, 1, 2, 2, 3, 3] :param a: tensor :param repeats: number of repeats :param dim: dimension where to repeat :return: tensor with repitions """ init_dim = a.size(dim) repeat_idx = [1] * a.dim() repeat_idx[dim] = repeats a = a.repeat(*(repeat_idx)) if a.is_cuda: # use cuda-device if input was on cuda device already order_index = torch.cuda.LongTensor( torch.cat([init_dim * torch.arange(repeats, device=a.device) + i for i in range(init_dim)])) else: order_index = torch.LongTensor(torch.cat([init_dim * torch.arange(repeats) + i for i in range(init_dim)])) return torch.index_select(a, dim, order_index) class GroupNorm(nn.Module): r"""Applies Group Normalization over a mini-batch of inputs as described in the paper `Group Normalization`_ . .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The input channels are separated into :attr:`num_groups` groups, each containing ``num_channels / num_groups`` channels. The mean and standard-deviation are calculated separately over the each group. :math:`\gamma` and :math:`\beta` are learnable per-channel affine transform parameter vectors of size :attr:`num_channels` if :attr:`affine` is ``True``. This layer uses statistics computed from input data in both training and evaluation modes. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\text{num\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = nn.GroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = nn.GroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = nn.GroupNorm(1, 6) >>> # Activating the module >>> output = m(input) .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 """ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias'] def __init__(self, num_channels, num_groups=2, eps=1e-5, affine=True, subtract_type="mean", robust_mean=0, robust_std=0): super(GroupNorm, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.group_feature = num_channels // num_groups self.subtract_type = subtract_type self.robust_mean = robust_mean self.robust_std = robust_std assert subtract_type in ['mean','none','median'], "wrong subtract_type!" self.eps = eps self.affine = affine if self.affine: self.weight = nn.Parameter(torch.Tensor(num_channels)) self.bias = nn.Parameter(torch.Tensor(num_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) #init.zeros_(self.weight) init.zeros_(self.bias) def forward(self, input, pad_mask=None, is_encoder=False): # input: only reudce over the C dim. shaped_input = (len(input.shape) == 2) if shaped_input: input = input.unsqueeze(0) T, B, C = input.shape # Permute the mask_input to (B, T, C) # mask_input = input.transpose(0, 1) # # Compute the mean, var for LN, size to be BxTx1 -> BxCxT # # Split the mask_input into group # gn_input = mask_input.view(B, T, self.num_groups, self.group_feature) # gn_input = gn_input.permute(1, 2, 3, 0).contiguous().view(T, self.num_groups, self.group_feature * B) # # TxGx1 -> TxC -> BxTxC -> BxCxT # mean_gn = tile(gn_input.mean(-1, keepdim=True).squeeze(-1), self.group_feature, -1).expand_as(mask_input).transpose(1, 2) # var_gn = tile(gn_input.var(-1, keepdim=True).squeeze(-1), self.group_feature, -1).expand_as(mask_input).transpose(1, 2) # # # Resize the input to (B, C, -1). # input = input.permute(1, 2, 0).contiguous() # input_shape = input.size() # input = input.view(input.size(0), self.num_channels, -1) # # input = (input - mean_gn) / (var_gn + self.eps).sqrt() # input = input * (self.weight).unsqueeze(-1) + (self.bias).unsqueeze(-1) # input = input.view(B, C, T) # input = input.permute(2, 0, 1).contiguous() # return input input = input.contiguous().view(-1, self.group_feature) ''' input = input.contiguous().view(-1, self.group_num, self.group_feature) std, subtract_term = torch.std_mean(input,dim=2,unbiased=False,keepdim=True) if self.subtract_type=='median': subtract_term, _ = torch.median(input,dim=2,keepdim=True) #var_of_mean = torch.var(subtract_term,dim=1,unbiased=False,keepdim=True) if self.robust_mean==1: subtract_term, _ = torch.median(subtract_term,dim=1,keepdim=True) #robust mean std = torch.std(input,dim=(1,2),unbiased=False,keepdim=True) #if self.robust_std==1: # std, _ = torch.median(std,dim=1,keepdim=True) #robust std #可能还少了一项:条件均值的方差 #std = (std**2+var_of_mean**2)**0.5 if self.subtract_type!='none': input = input-subtract_term input = input/std #有问题! ''' mean = input.mean(dim=1,keepdim=True) #var = ((input-mean) ** 2).mean(dim=2,keepdim=True)+self.eps var = input.var(dim=1,keepdim=True)+self.eps #s = torch.sqrt(var) #inv_std = var**-0.5 ##inv_std = 1/torch.sqrt(var) #output = (input - mean) * inv_std input = (input - mean) / torch.sqrt(var) ##暂时median后接mean的情况没考虑 input = input.contiguous().view(-1, C) if self.affine: input = input * (self.weight).unsqueeze(0) + (self.bias).unsqueeze(0) #input = F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps) input = input.contiguous().view(T, B, C) if shaped_input: input = input.squeeze(0) return input def extra_repr(self): return '{num_groups}, {num_channels}, eps={eps}, ' \ 'affine={affine}, subtract_type={subtract_type}'.format(**self.__dict__)
7,391
43
131
py
RegularizedBN
RegularizedBN-main/fairseq/modules/norm/mask_layernorm3d.py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : MaskBatchNorm.py # Distributed under MIT License. import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm import numpy as np from scipy import io __all__ = ['MaskLayerNorm3d'] class MaskLayerNorm3d(nn.Module): """ An implementation of masked batch normalization, used for testing the numerical stability. """ __count = 0 def update_cnt(self): MaskLayerNorm3d.__count += 1 def __init__(self, num_features, eps=1e-5, \ affine=True, with_seq = False, prefix = 'None'): super().__init__() self.id = self.__count self.prefix = prefix self.update_cnt() self.num_features = num_features self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.Tensor(num_features)) self.bias = Parameter(torch.Tensor(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def extra_repr(self): return '{num_features}, eps={eps}, affine={affine}'.format(**self.__dict__) def forward(self, input, pad_mask=None): """ input: T x B x C -> B x C x T : B x C x T -> T x B x C pad_mask: B x T (padding is True) """ #print(input.shape)#torch.Size([26, 2, 128]) T, B, C = input.shape #input = input.contiguous() # construct the mask_input, size to be (BxL) x C: L is the real length here # Compute the statistics mean = input.mean(dim=2,keepdim=True) #var = ((input-mean) ** 2).mean(dim=2,keepdim=True)+self.eps var = input.var(dim=2,keepdim=True)+self.eps output = (input - mean) / torch.sqrt(var) if self.affine: output = output*self.weight + self.bias return output
2,211
32.515152
83
py
RegularizedBN
RegularizedBN-main/fairseq/modules/norm/mask_groupscale.py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : MaskPowerNorm.py # Distributed under MIT License. import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F __all__ = ['MaskGruopScale'] def _sum_ft(tensor): """sum over the first and last dimention""" return tensor.sum(dim=0).sum(dim=-1) class GroupScaling1D(nn.Module): r"""Scales inputs by the second moment for the entire layer. """ def __init__(self, eps=1e-5, group_num=4): super(GroupScaling1D, self).__init__() self.eps = eps self.group_num = group_num def extra_repr(self): return f'eps={self.eps}, group={self.group_num}' def forward(self, input): # calculate second moment # different group use different mean T, B, C = input.shape[0], input.shape[1], input.shape[2] Cg = C // self.group_num gn_input = input.contiguous().reshape(T, B, self.group_num, Cg) moment2 = torch.repeat_interleave( torch.mean(gn_input * gn_input, dim=3, keepdim=True), \ repeats=Cg, dim=-1).contiguous().reshape(T, B, C) # divide out second moment return input / torch.sqrt(moment2 + self.eps) def _unsqueeze_ft(tensor): """add new dimensions at the front and the tail""" return tensor.unsqueeze(0).unsqueeze(-1) class PowerFunction(torch.autograd.Function): @staticmethod def forward(ctx, x, weight, bias, running_phi, eps, afwd, abkw, ema_gz, \ debug, warmup_iters, current_iter, mask_x): ctx.eps = eps ctx.debug = debug current_iter = current_iter.item() ctx.current_iter = current_iter ctx.warmup_iters = warmup_iters ctx.abkw = abkw rmax = 1 N, C, H, W = x.size() x2 = (mask_x * mask_x).mean(dim=0) var = x2.reshape(1, C, 1, 1) if current_iter <= warmup_iters: z = x /(var + eps).sqrt() else: z = x /(running_phi + eps).sqrt() y = z ctx.save_for_backward(z, var, weight, ema_gz) if current_iter < warmup_iters: running_phi.copy_(running_phi * (current_iter-1)/current_iter + var.mean(dim=0, keepdim=True)/current_iter) running_phi.copy_(afwd*running_phi + (1-afwd)*var.mean(dim=0, keepdim=True)) y = weight.reshape(1,C,1,1) * y + bias.reshape(1,C,1,1) return y @staticmethod def backward(ctx, grad_output): eps = ctx.eps debug = ctx.debug current_iter = ctx.current_iter warmup_iters = ctx.warmup_iters abkw = ctx.abkw N, C, H, W = grad_output.size() z, var, weight, ema_gz = ctx.saved_variables y = z g = grad_output * weight.reshape(1, C, 1, 1) g = g * 1 gz = (g * z).mean(dim=3).mean(dim=2).mean(dim=0) approx_grad_g = (g - (1 - abkw) * ema_gz * z) ema_gz.add_((approx_grad_g * z).mean(dim=3, keepdim=True).mean(dim=2, keepdim=True).mean(dim=0, keepdim=True)) gx = 1. / torch.sqrt(var + eps) * approx_grad_g return gx, (grad_output * y).sum(dim=3).sum(dim=2).sum(dim=0), grad_output.sum(dim=3).sum(dim=2).sum(dim=0), \ None, None, None, None, None, None, None, None, None, None class MaskGroupScale(nn.Module): """ An implementation of masked batch normalization, used for testing the numerical stability. """ def __init__(self, num_features, eps=1e-5, alpha_fwd=0.9, alpha_bkw=0.9, \ affine=True, warmup_iters=4000, group_num=1): super().__init__() self.num_features = num_features self.eps = eps self.affine = affine self.register_parameter('weight', nn.Parameter(torch.ones(num_features))) self.register_parameter('bias', nn.Parameter(torch.zeros(num_features))) self.register_buffer('running_phi', torch.ones(1,num_features,1,1)) self.register_buffer('ema_gz', torch.zeros(1,num_features,1,1)) self.register_buffer('iters', torch.zeros(1).type(torch.LongTensor)) self.afwd = alpha_fwd self.abkw = alpha_bkw self.eps = eps self.debug = False self.warmup_iters = warmup_iters self.gp = GroupScaling1D(group_num=group_num) self.group_num = group_num def extra_repr(self): return '{num_features}, eps={eps}, alpha_fwd={afwd}, alpha_bkw={abkw}, ' \ 'affine={affine}, warmup={warmup_iters}, group_num={group_num}'.format(**self.__dict__) def forward(self, input, pad_mask=None, is_encoder=False): """ input: T x B x C -> B x C x T : B x C x T -> T x B x C pad_mask: B x T (padding is True) """ shaped_input = (len(input.shape) == 2) if shaped_input: input = input.unsqueeze(0) T, B, C = input.shape input = self.gp(input) return input # construct the mask_input, size to be (BxL) x C: L is the real length here if pad_mask is None: mask_input = input.clone() else: # Transpose the bn_mask (B x T -> T x B) bn_mask = ~pad_mask bn_mask = bn_mask.transpose(0, 1) if pad_mask is not None: pad_size = (~bn_mask).sum() mask_input = input[bn_mask, :] else: mask_input = input.clone() mask_input = mask_input.reshape(-1, self.num_features) input = input.permute(1, 2, 0).contiguous() input_shape = input.size() input = input.reshape(input.size(0), self.num_features, -1) input = input.unsqueeze(-1) if self.training: self.iters.copy_(self.iters + 1) output = PowerFunction.apply(input, self.weight, self.bias, self.running_phi, self.eps, \ self.afwd, self.abkw, self.ema_gz, self.debug, self.warmup_iters, self.iters, mask_input) else: N, C, H, W = input.size() var = self.running_phi output = input / (var + self.eps).sqrt() output = self.weight.reshape(1,C,1,1) * output + self.bias.reshape(1,C,1,1) output = output.reshape(input_shape) output = output.permute(2, 0, 1).contiguous() # Reshape it. if shaped_input: output = output.squeeze(0) return output
6,418
34.076503
119
py
RegularizedBN
RegularizedBN-main/fairseq/modules/norm/mask_identity.py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : MaskBatchNorm.py # Distributed under MIT License. import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F __all__ = ['MaskIdentity'] class MaskIdentityNorm(nn.Module): """ """ def __init__(self, num_features,affine=False): super().__init__() self.num_features = num_features self.affine = affine def extra_repr(self): return '{num_features},' \ 'affine={affine}'.format(**self.__dict__) def forward(self, input, pad_mask=None, is_encoder=False): """ input: T x B x C -> B x C x T : B x C x T -> T x B x C pad_mask: B x T (padding is True) """ return input
790
23.71875
62
py
RegularizedBN
RegularizedBN-main/fairseq/modules/norm/mask_batchnorm3d.py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : MaskBatchNorm.py # Distributed under MIT License. import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm import numpy as np from scipy import io __all__ = ['MaskBatchNorm3d'] class MaskBatchNorm3d(nn.Module): """ An implementation of masked batch normalization, used for testing the numerical stability. """ __count = 0 def update_cnt(self): MaskBatchNorm3d.__count += 1 def __init__(self, num_features, eps=1e-5, momentum=0.05, \ affine=True, track_running_stats=True, sync_bn=True, process_group=None, \ with_seq = True, prefix = 'None', weight_affine=0, penalty_mean=0, penalty_var=0, \ penalty_type='diff', learn_alpha=False, alpha=1, normalize=True): super().__init__() self.id = self.__count self.prefix = prefix #print(self.id) self.update_cnt() self.interval = 1100 #standard: 1100 #self.save_interval = 20 self.batchnum = 0 self.num_features = num_features self.eps = eps self.momentum = momentum #default0.05 self.affine = affine self.track_running_stats = track_running_stats self.maxlen = 600 self.with_seq = with_seq self.learn_alpha = learn_alpha self.normalize = normalize if self.learn_alpha: self.alpha = Parameter(torch.Tensor(1)) nn.init.constant_(self.alpha, alpha) self.weight_affine = weight_affine self.penalty_mean = penalty_mean self.penalty_var = penalty_var self.penalty_type = penalty_type assert self.penalty_type in ['diff','diff2','reldiff','mag','reldiffnorm','reldiffcosine','symkl'], "wrong penalty type for BN!" #self.running_mean_list = [] #self.running_var_list = [] #self.batch_mean_list = [] #self.batch_var_list = [] self.batch_mean_norm = [] self.batch_var_norm = [] self.batch_sigma_norm = [] self.running_mean_norm = [] self.running_var_norm = [] self.running_sigma_norm = [] self.diff_mean_norm = [] self.diff_var_norm = [] self.diff_sigma_norm = [] self.mean_tid_list = [] self.var_tid_list = [] self.grad_mean_list = [] self.grad_proj_list = [] self.exp_var_list = [] self.var_exp_list = [] self.shape_list = [] self.file = 'statistics/{}/bn_{}'.format(self.prefix,self.id) if self.affine: self.weight = Parameter(torch.Tensor(num_features)) self.bias = Parameter(torch.Tensor(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) if self.track_running_stats: self.register_buffer('running_mean', torch.zeros(num_features)) #self.register_buffer('momentum_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) #self.register_buffer('momentum_var', torch.ones(num_features)) self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long)) ''' self.register_buffer('running_mean', torch.zeros(self.maxlen,num_features)) self.register_buffer('running_var', torch.ones(self.maxlen,num_features)) self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long)) ''' else: self.register_parameter('running_mean', None) self.register_parameter('running_var', None) self.register_parameter('num_batches_tracked', None) self.sync_bn = sync_bn # gpu_size is set through DistributedDataParallel initialization. This is to ensure that SyncBatchNorm is used # under supported condition (single GPU per process) self.process_group = process_group self.ddp_gpu_size = 4 self.reset_parameters() def reset_running_stats(self): if self.track_running_stats: self.running_mean.zero_() self.running_var.fill_(1) self.num_batches_tracked.zero_() def reset_parameters(self): self.reset_running_stats() if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def extra_repr(self): return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \ 'track_running_stats={track_running_stats}'.format(**self.__dict__) def record_forward(self): diff_mean_data = (self.running_mean-self.batch_mean).data diff_var_data = (self.running_var-self.batch_var).data self.running_sigma, self.batch_sigma = torch.sqrt(self.running_var.data), torch.sqrt(self.batch_var.data) var_tid = (self.running_sigma/self.batch_sigma-1).abs().mean() mean_tid = (diff_mean_data/self.batch_sigma).abs().mean() self.mean_tid_list.append(mean_tid.cpu().numpy().item()) self.var_tid_list.append(var_tid.cpu().numpy().item()) diff_sigma_data = (self.running_sigma-self.batch_sigma).data self.diff_mean_norm.append(diff_mean_data.norm().cpu().numpy().item()) self.diff_var_norm.append(diff_var_data.norm().cpu().numpy().item()) self.diff_sigma_norm.append(diff_sigma_data.norm().cpu().numpy().item()) self.batch_mean_norm.append(self.batch_mean.norm().cpu().numpy().item()) self.batch_var_norm.append(self.batch_var.norm().cpu().numpy().item()) self.batch_sigma_norm.append(self.batch_sigma.norm().cpu().numpy().item()) self.running_mean_norm.append(self.running_mean.norm().cpu().numpy().item()) self.running_var_norm.append(self.running_var.norm().cpu().numpy().item()) self.running_sigma_norm.append(self.running_sigma.norm().cpu().numpy().item()) self.exp_var_list.append(self.exp_var.norm().cpu().numpy().item()) self.var_exp_list.append(self.var_exp.norm().cpu().numpy().item()) self.shape_list.append(self.shape[:2]) #print(self.batch_var[:5]) #print(self.exp_var[:5]) #print(self.var_exp[:5]) #exit() if self.batchnum%self.interval==0: savefile = "{}_forward_{}.mat".format(self.file,self.batchnum//self.interval) d = {} diff_mean = np.array(self.diff_mean_norm) diff_var = np.array(self.diff_var_norm) diff_sigma = np.array(self.diff_sigma_norm) batch_mean = np.array(self.batch_mean_norm) batch_var = np.array(self.batch_var_norm) batch_sigma = np.array(self.batch_sigma_norm) running_mean = np.array(self.running_mean_norm) running_var = np.array(self.running_var_norm) running_sigma = np.array(self.running_sigma_norm) mean_tid = np.array(self.mean_tid_list) var_tid = np.array(self.var_tid_list) exp_var = np.array(self.exp_var_list) var_exp = np.array(self.var_exp_list) shape = np.array(self.shape_list) d['diff_mean'] = diff_mean d['diff_var'] = diff_var d['diff_sigma'] = diff_sigma d['running_mean'] = running_mean d['running_var'] = running_var d['running_sigma'] = running_sigma d['batch_mean'] = batch_mean d['batch_var'] = batch_var d['batch_sigma'] = batch_sigma d['mean_tid'] = mean_tid d['var_tid'] = var_tid d['exp_var'] = exp_var d['var_exp'] = var_exp d['shape'] = shape io.savemat(savefile, d) self.batch_mean_norm = [] self.batch_var_norm = [] self.batch_sigma_norm = [] self.running_mean_norm = [] self.running_var_norm = [] self.running_sigma_norm = [] self.diff_mean_norm = [] self.diff_var_norm = [] self.diff_sigma_norm = [] self.mean_tid_list = [] self.var_tid_list = [] self.exp_var_list = [] self.var_exp_list = [] self.shape_list = [] def backward_hook(self,grad): #B, T, C grad_mean = (grad*self.mask).sum(dim=(0,1))/self.sum_size**0.5 grad_proj = ((grad*self.mask)*self.x).sum(dim=(0,1))/self.sum_size**0.5 #self.grad_mean_list.append(grad_mean.data.cpu().reshape([1,-1])) #self.grad_proj_list.append(grad_proj.data.cpu().reshape([1,-1])) self.grad_mean_list.append(grad_mean.norm().cpu().numpy().item()) self.grad_proj_list.append(grad_proj.norm().cpu().numpy().item()) #print(grad_mean.shape,grad_proj.shape);exit() if self.batchnum%self.interval==0: savefile = "{}_backward_{}.mat".format(self.file,self.batchnum//self.interval) d = {} #grad_mean_arr = torch.cat(self.grad_mean_list,dim=0) #grad_proj_arr = torch.cat(self.grad_proj_list,dim=0) d['grad_mean'] = np.array(self.grad_mean_list) d['grad_proj'] = np.array(self.grad_proj_list) from scipy import io io.savemat(savefile, d) self.grad_mean_list = [] self.grad_proj_list = [] def loss(self): loss = 0 assert self.training==True, "wrongly adding BN inconsistent loss!" if self.penalty_mean==0 and self.penalty_var==0: return loss if self.penalty_type=='diff': loss = self.loss_diff(loss) if self.penalty_type=='diff2': loss = self.loss_diff2(loss) if self.penalty_type=='reldiff': loss = self.loss_rel_diff(loss) if self.penalty_type=='mag': loss = self.loss_magnitude(loss) if self.penalty_type=='reldiffnorm': loss = self.loss_rel_diff_norm(loss) if self.penalty_type=='reldiffcosine': loss = self.loss_rel_diff_cosine(loss) if self.penalty_type=='symkl': loss = self.sym_kl_loss(loss) return loss def loss_diff(self, loss): if self.weight_affine: loss += self.penalty_mean*(((self.running_mean-self.batch_mean)**2)*self.weight.detach()).sum() loss += self.penalty_var*((torch.abs(self.running_var-self.batch_var))*self.weight.detach()).sum() #print(loss) #loss: 初始十几,训了一小会,零点几,1,2的居多,偶尔有9 else: loss += self.penalty_mean*((self.running_mean-self.batch_mean)**2).sum() loss += self.penalty_var*(torch.abs(self.running_var-self.batch_var)).sum() return loss def loss_diff2(self, loss): self.running_sigma, self.batch_sigma = torch.sqrt(self.running_var), torch.sqrt(self.batch_var) loss += self.penalty_mean*((self.running_mean-self.batch_mean)**2).sum() loss += self.penalty_var*((self.running_sigma-self.batch_sigma)**2).sum() return loss def loss_rel_diff(self, loss): self.running_sigma, self.batch_sigma = torch.sqrt(self.running_var), torch.sqrt(self.batch_var) loss += self.penalty_mean*(((self.running_mean-self.batch_mean)/self.running_sigma)**2).sum() loss += self.penalty_var*((1-self.batch_sigma/self.running_sigma)**2).sum() return loss def loss_rel_diff_norm(self, loss): loss += self.penalty_mean*(((self.running_mean-self.batch_mean)/self.running_mean.norm())**2).sum() loss += self.penalty_var*(torch.abs(self.running_var-self.batch_var)/self.running_var.norm(p=1)).sum() return loss def loss_rel_diff_cosine(self, loss): loss -= self.penalty_mean*(self.running_mean*self.batch_mean).sum()/self.batch_mean.norm()/self.running_mean.norm() loss -= self.penalty_var*(self.running_var*self.batch_var).sum()/self.batch_var.norm()/self.running_var.norm() #loss -= self.penalty_var*torch.sqrt((self.running_var*self.batch_var)).sum()/torch.sqrt(self.batch_var.sum()*self.running_var.sum()) return loss def loss_magnitude(self, loss): loss += self.penalty_mean*((self.batch_mean)**2).sum() loss += self.penalty_var*(torch.abs(self.batch_var)).sum() return loss def sym_kl_loss(self,loss): item1 = self.running_var/(self.batch_var+self.eps)+self.batch_var/(self.running_var+self.eps) item2 = ((self.running_mean-self.batch_mean)**2)*(1/(self.batch_var+self.eps)+1/(self.running_var+self.eps)) loss += self.penalty_mean*(item1.sum()+item2.sum()) return loss def forward(self, input, pad_mask=None, is_encoder=False, update_run=True): """ input: T x B x C -> B x C x T : B x C x T -> T x B x C pad_mask: B x T (padding is True) """ T, B, C = input.shape #if self.id==0: # print(input.shape) input = input.contiguous() #print(input.shape) #21, 192, 512 input = input.permute(1,0,2) #T,B,C-->B,T,C #print(pad_mask.shape,input.shape)#torch.Size([192, 21]) torch.Size([192, 21, 512]) #print(~pad_mask);exit()#true: 有mask, false:无mask if pad_mask is not None: mask = 1-pad_mask.unsqueeze(dim=-1).type_as(input) else: mask = torch.ones(B,T,1).cuda() #if not self.training: # mask = torch.ones(B,T,1).cuda() #else: # print("error bn pad mask!") # print(self.id) # print(pad_mask) # exit() input = input*mask # Compute the sum and square-sum. sum_size = mask.sum() #print(sum_size,input.size(0)*input.size(1),sum_size/input.size(0)/input.size(1));exit() #4032 input_sum = input.sum(dim=(0,1),keepdim=True) input_ssum = (input**2).sum(dim=(0,1),keepdim=True) # # Compute the statistics if self.training: self.shape = [B,T,C] self.L_sum = input.sum(dim=1, keepdim=True) self.L_ssum = (input**2).sum(dim=1, keepdim=True) self.L_sum_size = mask.sum(dim=1, keepdim=True) self.L_mean = self.L_sum/self.L_sum_size self.L_m2 = self.L_ssum/self.L_sum_size self.L_var = self.L_m2-self.L_mean**2 self.exp_var = self.L_var.mean(dim=(0,1)) self.var_exp = self.L_mean.var(dim=(0,1),unbiased=False) self.batchnum += 1 self.mask = mask self.sum_size = sum_size #相当于求统计量的batch size mean = input_sum/sum_size bias_var = input_ssum/sum_size-mean**2 #if self.learn_alpha: # mean = self.alpha*mean # bias_var = bias_var+((self.alpha-1)*mean)**2 #用于计算running statistics self.batch_mean = mean.squeeze() self.batch_var = bias_var.squeeze() with torch.no_grad(): stat_mean = mean.squeeze() stat_var = bias_var.squeeze() self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * stat_mean.data self.running_var = (1 - self.momentum) * self.running_var + self.momentum * stat_var.data #self.momentum_mean = (1 - self.momentum) * self.momentum_mean + self.momentum * (stat_mean.data-self.running_mean) #self.momentum_var = (1 - self.momentum) * self.momentum_var + self.momentum * (stat_var.data-self.running_var) #self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * self.momentum_mean #self.running_var = (1 - self.momentum) * self.running_var + self.momentum * self.momentum_var self.record_forward() else: mean, bias_var = self.running_mean, self.running_var if self.normalize: input = (input - mean)/torch.sqrt(bias_var.clamp(self.eps)) #x=(input-mean)/sigma if self.training: #input.register_hook(self.backward_hook) pass if self.training: self.x = input.data*mask if self.affine: output = input*self.weight + self.bias else: output = input output = output.permute(1, 0, 2).contiguous() #B,T,C-->T,B,C return output
16,604
41.686375
141
py
RegularizedBN
RegularizedBN-main/fairseq/modules/dynamicconv_layer/setup.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import CUDAExtension, BuildExtension setup( name='dynamicconv_layer', ext_modules=[ CUDAExtension( name='dynamicconv_cuda', sources=[ 'dynamicconv_cuda.cpp', 'dynamicconv_cuda_kernel.cu', ], ), ], cmdclass={ 'build_ext': BuildExtension })
613
24.583333
67
py
RegularizedBN
RegularizedBN-main/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.autograd import Function import torch.nn.functional as F import dynamicconv_cuda from fairseq import utils from fairseq.modules.unfold import unfold1d from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout class dynamicconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = dynamicconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = dynamicconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors) grad_input, grad_weights = outputs return grad_input, grad_weights, None @with_incremental_state class DynamicconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0., bias=False, renorm_padding=False, conv_bias=False, query_size=None, ): super(DynamicconvLayer, self).__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__) self.renorm_padding = renorm_padding self.bias = bias self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight_linear.weight) if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.) nn.init.constant_(self.weight_linaer.bias, 0.) def forward(self, x, incremental_state=None, query=None, unfold=None): T, B, C = x.size() K, H = self.kernel_size, self.num_heads # R = C // H # during inference time, incremental BMM is faster if incremental_state is not None: unfold = x.size(0) > 512 if unfold is None else unfold # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output # during training time, use CUDA kernel else: weight = self.weight_linear(x).view(T, B, H, K) if self.weight_softmax: weight = F.softmax(weight, dim=-1) if self.weight_dropout_module.p: weight = self.weight_dropout_module(weight) weight = weight.permute(1, 2, 3, 0).contiguous() self.filters = weight x = x.permute(1, 2, 0).contiguous() output = dynamicconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def _forward_unfolded(self, x, incremental_state, query): '''The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T*B*H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K-1: weight = weight.narrow(1, K-T, T) K, padding_l = T, T-1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T*B*H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): '''Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. ''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T*B*H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B*H, K).transpose(0, 1) x = x.view(T, B*H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B*H, T, T+K-1).fill_(float('-inf')) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False) else: P = self.padding_l # For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K-1: weight = weight.narrow(2, K-T, T) K, P = T, T-1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output
8,719
39.184332
129
py
RegularizedBN
RegularizedBN-main/fairseq/modules/fc/oni_fc.py
""" Orthogonalization by Newton’s Iteration """ import torch.nn import torch.nn.functional as F from torch.nn import Parameter from torch.autograd import Variable from typing import List from torch.autograd.function import once_differentiable __all__ = ['WN_Conv2d', 'OWN_Conv2d', 'ONI_Conv2d','ONI_ConvTranspose2d', 'ONI_Linear'] # norm funcitons-------------------------------- class IdentityModule(torch.nn.Module): def __init__(self, *args, **kwargs): super(IdentityModule, self).__init__() def forward(self, input: torch.Tensor): return input class WNorm(torch.nn.Module): def forward(self, weight): weight_ = weight.view(weight.size(0), -1) #std = weight_.std(dim=1, keepdim=True) + 1e-5 norm = weight_.norm(dim=1, keepdim=True) + 1e-5 weight_norm = weight_ / norm return weight_norm.view(weight.size()) class OWNNorm(torch.nn.Module): def __init__(self, norm_groups=1, *args, **kwargs): super(OWNNorm, self).__init__() self.norm_groups = norm_groups def matrix_power3(self, Input): B=torch.bmm(Input, Input) return torch.bmm(B, Input) def forward(self, weight: torch.Tensor): assert weight.shape[0] % self.norm_groups == 0 Z = weight.view(self.norm_groups, weight.shape[0] // self.norm_groups, -1) # type: torch.Tensor Zc = Z - Z.mean(dim=-1, keepdim=True) S = torch.matmul(Zc, Zc.transpose(1, 2)) wm = torch.randn(S.shape).to(S) #Scales = torch.eye(S.shape[-1]).to(S).expand(S.shape) #Us = torch.eye(S.shape[-1]).to(S).expand(S.shape) for i in range(self.norm_groups): U, Eig, _ = S[i].svd() Scales = Eig.rsqrt().diag() wm[i] = U.mm(Scales).mm(U.t()) W = wm.matmul(Zc) #print(W.matmul(W.transpose(1,2))) # W = oni_py.apply(weight, self.T, ctx.groups) return W.view_as(weight) def extra_repr(self): fmt_str = ['OWN:'] if self.norm_groups > 1: fmt_str.append('groups={}'.format(self.norm_groups)) return ', '.join(fmt_str) class ONINorm(torch.nn.Module): def __init__(self, T=5, norm_groups=1, *args, **kwargs): super(ONINorm, self).__init__() self.T = T self.norm_groups = norm_groups self.eps = 1e-5 def matrix_power3(self, Input): B=torch.bmm(Input, Input) return torch.bmm(B, Input) def forward(self, weight: torch.Tensor): assert weight.shape[0] % self.norm_groups == 0 Z = weight.view(self.norm_groups, weight.shape[0] // self.norm_groups, -1) # type: torch.Tensor Zc = Z - Z.mean(dim=-1, keepdim=True) S = torch.matmul(Zc, Zc.transpose(1, 2)) eye = torch.eye(S.shape[-1]).to(S).expand(S.shape) S = S + self.eps*eye norm_S = S.norm(p='fro', dim=(1, 2), keepdim=True) S = S.div(norm_S) B = [torch.Tensor([]) for _ in range(self.T + 1)] B[0] = torch.eye(S.shape[-1]).to(S).expand(S.shape) for t in range(self.T): #B[t + 1] = torch.baddbmm(1.5, B[t], -0.5, torch.matrix_power(B[t], 3), S) B[t + 1] = torch.baddbmm(1.5, B[t], -0.5, self.matrix_power3(B[t]), S) W = B[self.T].matmul(Zc).div_(norm_S.sqrt()) #print(W.matmul(W.transpose(1,2))) # W = oni_py.apply(weight, self.T, ctx.groups) return W.view_as(weight) def extra_repr(self): fmt_str = ['T={}'.format(self.T)] if self.norm_groups > 1: fmt_str.append('groups={}'.format(self.norm_groups)) return ', '.join(fmt_str) class ONINorm_colum(torch.nn.Module): def __init__(self, T=5, norm_groups=1, *args, **kwargs): super(ONINorm_colum, self).__init__() self.T = T self.norm_groups = norm_groups self.eps = 1e-5 #print(self.eps) def matrix_power3(self, Input): B=torch.bmm(Input, Input) return torch.bmm(B, Input) def forward(self, weight: torch.Tensor): assert weight.shape[0] % self.norm_groups == 0 Z = weight.view(self.norm_groups, weight.shape[0] // self.norm_groups, -1) # type: torch.Tensor Zc = Z - Z.mean(dim=-1, keepdim=True) S = torch.matmul(Zc.transpose(1, 2), Zc) eye = torch.eye(S.shape[-1]).to(S).expand(S.shape) S = S + self.eps*eye norm_S = S.norm(p='fro', dim=(1, 2), keepdim=True) #print(S.size()) #S = S.div(norm_S) B = [torch.Tensor([]) for _ in range(self.T + 1)] B[0] = torch.eye(S.shape[-1]).to(S).expand(S.shape) for t in range(self.T): #B[t + 1] = torch.baddbmm(1.5, B[t], -0.5, torch.matrix_power(B[t], 3), S) B[t + 1] = torch.baddbmm(1.5, B[t], -0.5, self.matrix_power3(B[t]), S) W = Zc.matmul(B[self.T]).div_(norm_S.sqrt()) #print(W.matmul(W.transpose(1,2))) # W = oni_py.apply(weight, self.T, ctx.groups) return W.view_as(weight) def extra_repr(self): fmt_str = ['T={}'.format(self.T)] if self.norm_groups > 1: fmt_str.append('groups={}'.format(self.norm_groups)) return ', '.join(fmt_str) # normedConvs-------------------------------- class WN_Conv2d(torch.nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, NScale=1.414, adjustScale=False, *args, **kwargs): super(WN_Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) print('WN_Conv:---NScale:', NScale, '---adjust:', adjustScale) self.weight_normalization = WNorm() self.scale_ = torch.ones(out_channels, 1, 1, 1).fill_(NScale) if adjustScale: self.WNScale = Parameter(self.scale_) else: # self.scale = Variable(self.scale, requires_grad=False) self.register_buffer('WNScale', self.scale_) def forward(self, input_f: torch.Tensor) -> torch.Tensor: weight_q = self.weight_normalization(self.weight) weight_q = weight_q * self.WNScale out = F.conv2d(input_f, weight_q, self.bias, self.stride, self.padding, self.dilation, self.groups) return out class OWN_Conv2d(torch.nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, norm_groups=1, norm_channels=0, NScale=1.414, adjustScale=False, *args, **kwargs): super(OWN_Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) if norm_channels > 0: norm_groups = out_channels // norm_channels print('OWN_Conv:----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale) self.weight_normalization = OWNNorm(norm_groups=norm_groups) self.scale_ = torch.ones(out_channels, 1, 1, 1).fill_(NScale) if adjustScale: self.WNScale = Parameter(self.scale_) else: # self.scale = Variable(self.scale, requires_grad=False) self.register_buffer('WNScale', self.scale_) def forward(self, input_f: torch.Tensor) -> torch.Tensor: weight_q = self.weight_normalization(self.weight) weight_q = weight_q * self.WNScale out = F.conv2d(input_f, weight_q, self.bias, self.stride, self.padding, self.dilation, self.groups) return out class ONI_Conv2d(torch.nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, T=5, norm_groups=1, norm_channels=0, NScale=1.414, adjustScale=False, ONIRow_Fix=False, *args, **kwargs): super(ONI_Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) print('ONI channels:--OD:',out_channels, '--ID:', in_channels, '--KS',kernel_size) if out_channels <= (in_channels*kernel_size*kernel_size): if norm_channels > 0: norm_groups = out_channels // norm_channels #print('ONI_Conv_Row:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale) self.weight_normalization = ONINorm(T=T, norm_groups=norm_groups) else: if ONIRow_Fix: # print('ONI_Conv_Row:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale) self.weight_normalization = ONINorm(T=T, norm_groups=norm_groups) else: # print('ONI_Conv_Colum:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale) self.weight_normalization = ONINorm_colum(T=T, norm_groups=norm_groups) self.scale_ = torch.ones(out_channels, 1, 1, 1).fill_(NScale) if adjustScale: self.WNScale = Parameter(self.scale_) else: # self.scale = Variable(self.scale, requires_grad=False) self.register_buffer('WNScale', self.scale_) def forward(self, input_f: torch.Tensor) -> torch.Tensor: weight_q = self.weight_normalization(self.weight) weight_q = weight_q * self.WNScale out = F.conv2d(input_f, weight_q, self.bias, self.stride, self.padding, self.dilation, self.groups) return out class ONI_Linear(torch.nn.Linear): def __init__(self, in_channels, out_channels, bias=True, T=4, norm_groups=1, norm_channels=0, NScale=1, adjustScale=False, *args, **kwargs): super(ONI_Linear, self).__init__(in_channels, out_channels, bias) if out_channels <= in_channels: if norm_channels > 0: norm_groups = out_channels // norm_channels print('ONI_Linear_Row:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale) self.weight_normalization = ONINorm(T=T, norm_groups=norm_groups) else: print('ONI_Linear_Colum:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale) self.weight_normalization = ONINorm_colum(T=T, norm_groups=norm_groups) self.scale_ = torch.ones(out_channels, 1, ).fill_(NScale) if adjustScale: self.WNScale = Parameter(self.scale_) else: # self.scale = Variable(self.scale, requires_grad=False) self.register_buffer('WNScale', self.scale_) def forward(self, input_f: torch.Tensor) -> torch.Tensor: weight_q = self.weight_normalization(self.weight) weight_q = weight_q * self.WNScale out = F.linear(input_f, weight_q, self.bias) return out #Trans Conv class ONI_ConvTranspose2d(torch.nn.ConvTranspose2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True, T=5, norm_groups=1, NScale=1.414, adjustScale=False): super(ONI_ConvTranspose2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, output_padding, groups, bias, dilation) print('ONI_Column:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale) self.weight_normalization = ONINorm(T=T, norm_groups=norm_groups) self.scale_ = torch.ones(out_channels, 1, 1, 1).fill_(NScale) if adjustScale: self.scale = Parameter(self.scale_) else: # self.scale = Variable(self.scale, requires_grad=False) self.register_buffer('scale', self.scale_) def forward(self, input_f: torch.Tensor, output_size=None) -> torch.Tensor: output_padding = self._output_padding(input_f, output_size, self.stride, self.padding, self.kernel_size) weight_q = self.weight_normalization(self.weight) weight_q = weight_q * self.scale out = F.conv_transpose2d(input_f, weight_q, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation) return out if __name__ == '__main__': SEED = 0 torch.manual_seed(SEED) oni_ = ONINorm(T=5, norm_groups=1) w_ = torch.randn(4, 4, 2, 2) print(w_) w_.requires_grad_() y_ = oni_(w_) z_ = y_.view(w_.size(0), -1) #print(z_.sum(dim=1)) print(z_.matmul(z_.t())) # y_.sum().backward() # print('w grad', w_.grad.size()) # conv=ONI_Conv2d(4, 2, 1, adjustScale=True) # b = conv(w_) # print(b)
12,549
41.398649
146
py
RegularizedBN
RegularizedBN-main/fairseq/modules/fc/dropout_fc.py
import torch import torch.nn as nn import torch.nn.functional as F __all__ = ['DropoutFC'] class DropoutFC(nn.Linear): def __init__(self, in_features, out_features, bias=True, dropout=0, scale=1.0): super(DropoutFC, self).__init__(in_features, out_features, bias) print('DropoutFC dropout:{}, scale:{}'.format(dropout,scale)) self.weight_dropout_module = nn.Dropout(p=dropout) nn.init.xavier_uniform_(self.weight,gain=scale) nn.init.constant_(self.bias, 0.0) def forward(self, input_f: torch.Tensor) -> torch.Tensor: weight_q = self.weight_dropout_module(self.weight) out = F.linear(input_f, weight_q, self.bias) return out if __name__ == '__main__': m = DropoutFC(2,4) w_ = torch.randn(5, 2) w_.requires_grad_() y_ = m(w_) #z_ = y_.view(w_.size(0), -1) #print(z_.norm(dim=1)) y_.sum().backward() print('w grad', w_.grad.size())
940
28.40625
83
py
RegularizedBN
RegularizedBN-main/fairseq/modules/fc/conv.py
import torch from torch import nn import torch.nn.functional as F class Conv1d(nn.Conv1d): def __init__(self,in_channels, out_channels, kernel_size=3, stride=1): self.padding = (kernel_size-1)//2 self.stride = stride super(Conv1d, self).__init__(in_channels, out_channels, kernel_size, stride=stride,padding=self.padding) def extra_repr(self): return 'in_channels{in_channels}, out_channels{out_channels},kernel_size{kernel_size}'.format(**self.__dict__) def forward(self,x): #T,B,C x = x.permute(1,2,0) #T,B,C-->B,C,T x = F.conv1d(x, self.weight, bias=self.bias, stride=self.stride, padding=self.padding) x = x.permute(2,0,1) #B,C,T-->T,B,C return x
674
38.705882
112
py
RegularizedBN
RegularizedBN-main/fairseq/modules/fc/wn.py
import torch.nn import torch.nn.functional as F from torch.nn import Parameter from torch.autograd import Variable from typing import List from torch.autograd.function import once_differentiable __all__ = ['CWN'] # norm funcitons-------------------------------- class CWNorm(torch.nn.Module): def forward(self, weight): weight_ = weight.view(weight.size(0), -1) weight_mean = weight_.mean(dim=1, keepdim=True) weight_ = weight_ - weight_mean norm = weight_.norm(dim=1, keepdim=True) + 1e-5 weight_CWN = weight_ / norm return weight_CWN.view(weight.size()) #默认scale是1.414,可能是由于relu class CWN(torch.nn.Linear): def __init__(self, in_features, out_features, iscenter=True, bias=True, NScale=1, adjustScale=False, *args, **kwargs): super(CWN, self).__init__(in_features, out_features, bias) print('CWN:---NScale:', NScale, '---adjust:', adjustScale) self.weight_normalization = CWNorm() if NScale<0: self.scale_ = torch.norm(self.weight.data,dim=1,keepdim=True) else: self.scale_ = torch.ones(out_features, 1).fill_(NScale) if adjustScale: self.WNScale = Parameter(self.scale_) else: # self.scale = Variable(self.scale, requires_grad=False) self.register_buffer('WNScale', self.scale_) def forward(self, input_f: torch.Tensor) -> torch.Tensor: weight_q = self.weight_normalization(self.weight) weight_q = weight_q * self.WNScale out = F.linear(input_f, weight_q, self.bias) return out class MultiHeadCWN(torch.nn.Linear): def __init__(self, in_features, out_features, bias=True, num_heads=8, NScale=1, adjustScale=False, *args, **kwargs): super(MultiHeadCWN, self).__init__(in_features, out_features, bias) print('MultiHeadCWN:---NScale:', NScale, '---adjust:', adjustScale) self.in_features = in_features self.out_features = out_features self.num_heads = num_heads self.weight_normalization = CWNorm() self.scale_ = torch.ones(out_features, 1).fill_(NScale) if adjustScale: self.WNScale = Parameter(self.scale_) else: # self.scale = Variable(self.scale, requires_grad=False) self.register_buffer('WNScale', self.scale_) def forward(self, input_f: torch.Tensor) -> torch.Tensor: #self.weight = self.weight.reshape(self.in_features*self.num_heads, self.out_features//self.num_heads) #self.weight = self.weight.view(self.in_features*self.num_heads, self.out_features//self.num_heads) weight_q = self.weight_normalization(self.weight) #weight_q = weight_q.reshape(self.in_features, self.out_features) weight_q = weight_q * self.WNScale out = F.linear(input_f, weight_q, self.bias) return out if __name__ == '__main__': cwn_ = CWNorm() print(cwn_) w_ = torch.randn(4, 3, 2) w_.requires_grad_() y_ = cwn_(w_) z_ = y_.view(w_.size(0), -1) print(z_.norm(dim=1)) y_.sum().backward() print('w grad', w_.grad.size())
3,164
38.5625
110
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/pq/em.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import random import logging from collections import Counter import torch class EM: """ EM algorithm used to quantize the columns of W to minimize ||W - W_hat||^2 Args: - W: weight matrix of size (in_features x out_features) - n_iter: number of k-means iterations - n_centroids: number of centroids (size of codebook) - eps: for cluster reassignment when an empty cluster is found - max_tentatives for cluster reassignment when an empty cluster is found - verbose: print error after each iteration Remarks: - If one cluster is empty, the most populated cluster is split into two clusters - All the relevant dimensions are specified in the code """ def __init__( self, W, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True ): self.W = W self.n_centroids = n_centroids self.n_iter = n_iter self.eps = eps self.max_tentatives = max_tentatives self.verbose = verbose self.centroids = torch.Tensor() self.assignments = torch.Tensor() self.objective = [] def initialize_centroids(self): """ Initializes the centroids by sampling random columns from W. """ in_features, out_features = self.W.size() indices = torch.randint( low=0, high=out_features, size=(self.n_centroids,) ).long() self.centroids = self.W[:, indices].t() # (n_centroids x in_features) def step(self, i): """ There are two standard steps for each iteration: expectation (E) and minimization (M). The E-step (assignment) is performed with an exhaustive search and the M-step (centroid computation) is performed with the exact solution. Args: - i: step number Remarks: - The E-step heavily uses PyTorch broadcasting to speed up computations and reduce the memory overhead """ # assignments (E-step) distances = self.compute_distances() # (n_centroids x out_features) self.assignments = torch.argmin(distances, dim=0) # (out_features) n_empty_clusters = self.resolve_empty_clusters() # centroids (M-step) for k in range(self.n_centroids): W_k = self.W[:, self.assignments == k] # (in_features x size_of_cluster_k) self.centroids[k] = W_k.mean(dim=1) # (in_features) # book-keeping obj = (self.centroids[self.assignments].t() - self.W).norm(p=2).item() self.objective.append(obj) if self.verbose: logging.info( f"Iteration: {i},\t" f"objective: {obj:.6f},\t" f"resolved empty clusters: {n_empty_clusters}" ) def resolve_empty_clusters(self): """ If one cluster is empty, the most populated cluster is split into two clusters by shifting the respective centroids. This is done iteratively for a fixed number of tentatives. """ # empty clusters counts = Counter(map(lambda x: x.item(), self.assignments)) empty_clusters = set(range(self.n_centroids)) - set(counts.keys()) n_empty_clusters = len(empty_clusters) tentatives = 0 while len(empty_clusters) > 0: # given an empty cluster, find most populated cluster and split it into two k = random.choice(list(empty_clusters)) m = counts.most_common(1)[0][0] e = torch.randn_like(self.centroids[m]) * self.eps self.centroids[k] = self.centroids[m].clone() self.centroids[k] += e self.centroids[m] -= e # recompute assignments distances = self.compute_distances() # (n_centroids x out_features) self.assignments = torch.argmin(distances, dim=0) # (out_features) # check for empty clusters counts = Counter(map(lambda x: x.item(), self.assignments)) empty_clusters = set(range(self.n_centroids)) - set(counts.keys()) # increment tentatives if tentatives == self.max_tentatives: logging.info( f"Could not resolve all empty clusters, {len(empty_clusters)} remaining" ) raise EmptyClusterResolveError tentatives += 1 return n_empty_clusters def compute_distances(self): """ For every centroid m, computes ||M - m[None, :]||_2 Remarks: - We rely on PyTorch's broadcasting to speed up computations and reduce the memory overhead - Without chunking, the sizes in the broadcasting are modified as: (n_centroids x n_samples x out_features) -> (n_centroids x out_features) - The broadcasting computation is automatically chunked so that the tensors fit into the memory of the GPU """ nb_centroids_chunks = 1 while True: try: return torch.cat( [ (self.W[None, :, :] - centroids_c[:, :, None]).norm(p=2, dim=1) for centroids_c in self.centroids.chunk( nb_centroids_chunks, dim=0 ) ], dim=0, ) except RuntimeError: nb_centroids_chunks *= 2 def assign(self): """ Assigns each column of W to its closest centroid, thus essentially performing the E-step in train(). Remarks: - The function must be called after train() or after loading centroids using self.load(), otherwise it will return empty tensors """ distances = self.compute_distances() # (n_centroids x out_features) self.assignments = torch.argmin(distances, dim=0) # (out_features) def save(self, path, layer): """ Saves centroids and assignments. Args: - path: folder used to save centroids and assignments """ torch.save(self.centroids, os.path.join(path, "{}_centroids.pth".format(layer))) torch.save( self.assignments, os.path.join(path, "{}_assignments.pth".format(layer)) ) torch.save(self.objective, os.path.join(path, "{}_objective.pth".format(layer))) def load(self, path, layer): """ Loads centroids and assignments from a given path Args: - path: folder use to load centroids and assignments """ self.centroids = torch.load( os.path.join(path, "{}_centroids.pth".format(layer)) ) self.assignments = torch.load( os.path.join(path, "{}_assignments.pth".format(layer)) ) self.objective = torch.load( os.path.join(path, "{}_objective.pth".format(layer)) ) class EmptyClusterResolveError(Exception): pass
7,333
33.59434
92
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/pq/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import re from operator import attrgetter, itemgetter import numpy as np import torch.nn as nn import torch.distributed as dist from .modules import PQConv2d, PQLinear, PQEmbedding from .pq import PQ def quantize_model_( model, size_tracker, layers_to_quantize, block_sizes_config, n_centroids_config, step=0, n_iter=15, eps=1e-6, max_tentatives=100, verbose=True, ): """ Quantize a model in-place by stages. All the targeted layers are replaced by their quantized counterpart, and the model is ready for the finetuning of the centroids in a standard training loop (no modifications required). Note that we do not quantize biases. Args: - model: a nn.Module - size_tracker: useful for tracking quatization statistics - layers_to_quantize: a list containing regexps for filtering the layers to quantize at each stage according to their name (as in model.named_parameters()) - block_sizes_config: dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. - n_centroids_config: dict like { 'Conv2d': ('kernel_size', {'*': 256}), 'Linear': ('in_features', {'*': 256}) } For instance, all conv2d layers are quantized with 256 centroids - step: the layers to quantize inplace corresponding to layers_to_quantize[step] """ quantized_layers = get_layers(model, layers_to_quantize[step]) for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or (dist.is_initialized() and dist.get_rank() == 0) verbose = verbose and is_master_process # get block size and centroids module = attrgetter(layer)(model) block_size = get_param(module, layer, block_sizes_config) n_centroids = get_param(module, layer, n_centroids_config) if verbose: logging.info(f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids") # quantize layer weight = module.weight.data.clone() is_bias = 'bias' in [x[0] for x in module.named_parameters()] bias = module.bias.data.clone() if is_bias else None quantizer = PQ( weight, block_size, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose, ) # quantization performed on all GPUs with same seed quantizer.encode() centroids = quantizer.centroids.contiguous() assignments = quantizer.assignments.contiguous() # broadcast results to make sure weights are up-to-date if dist.is_initialized(): dist.broadcast(centroids, 0) dist.broadcast(assignments, 0) # instantiate the quantized counterpart if isinstance(module, nn.Linear): out_features, in_features = map( lambda k: module.__dict__[k], ["out_features", "in_features"] ) quantized_module = PQLinear( centroids, assignments, bias, in_features, out_features ) elif isinstance(module, nn.Embedding): num_embeddings, embedding_dim = map( lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"] ) quantized_module = PQEmbedding( centroids, assignments, num_embeddings, embedding_dim ) elif isinstance(module, nn.Conv2d): out_channels, in_channels, kernel_size = map( lambda k: module.__dict__[k], ["out_channels", "in_channels", "kernel_size"], ) stride, padding, dilation, groups, padding_mode = map( lambda k: module.__dict__[k], ["stride", "padding", "dilation", "groups", "padding_mode"], ) quantized_module = PQConv2d( centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, padding_mode=padding_mode, ) else: raise ValueError(f"Module {module} not yet supported for quantization") # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # update statistics size_tracker.update(weight, block_size, n_centroids) # return name of quantized layers return quantized_layers def get_layers(model, filter_regexp): """ Filters out the layers according to a regexp. Note that we omit biases. Args: - model: a nn.Module - filter_regexp: a regexp to filter the layers to keep according to their name in model.named_parameters(). For instance, the regexp: down_layers\\.[123456]\\.(conv[12]|identity\\.conv)) is keeping blocks down_layers from 1 to 6, and inside each block is keeping conv1, conv2 and identity.conv. Remarks: - We add (module\\.)? at the beginning of the regexp to account for the possible use of nn.parallel.DataParallel """ # get all parameter names all_layers = map(itemgetter(0), model.named_parameters()) # remove biases all_layers = filter(lambda x: "bias" not in x, all_layers) # remove .weight in all other names (or .weight_orig is spectral norm) all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers) all_layers = map(lambda x: x.replace(".weight", ""), all_layers) # return filtered layers filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")" r = re.compile(filter_regexp) return list(filter(r.match, all_layers)) def get_param(module, layer_name, param_config): """ Given a quantization configuration, get the right parameter for the module to be quantized. Args: - module: a nn.Module - layer_name: the name of the layer - param_config: a dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. Remarks: - if 'fuzzy_name' is passed as a parameter, layers whose layer_name include 'fuzzy_name' will be assigned the given parameter. In the following example, conv.expand layers will have a block size of 9 while conv.reduce will have a block size of 4 and all other layers will have a block size of 2. { 'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}), 'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4}) } """ layer_type = module.__class__.__name__ if layer_type not in param_config: raise KeyError(f"Layer type {layer_type} not in config for layer {module}") feature, params = param_config[module.__class__.__name__] if feature != "fuzzy_name": feature_value = str(getattr(module, feature)) if feature_value not in params: if "*" in params: feature_value = "*" else: raise KeyError( f"{feature}={feature_value} not in config for layer {module}" ) else: feature_values = [name for name in params if name in layer_name] if len(feature_values) == 0: if "*" in params: feature_value = "*" else: raise KeyError( f"name={layer_name} not in config for {module}" ) else: feature_value = feature_values[0] return params[feature_value] class SizeTracker(object): """ Class to keep track of the compressed network size with iPQ. Args: - model: a nn.Module Remarks: - The compressed size is the sum of three components for each layer in the network: (1) Storing the centroids given by iPQ in fp16 (2) Storing the assignments of the blocks in int8 (3) Storing all non-compressed elements such as biases - This cost in only valid if we use 256 centroids (then indexing can indeed by done with int8). """ def __init__(self, model): self.model = model self.size_non_compressed_model = self.compute_size() self.size_non_quantized = self.size_non_compressed_model self.size_index = 0 self.size_centroids = 0 self.n_quantized_layers = 0 def compute_size(self): """ Computes the size of the model (in MB). """ res = 0 for _, p in self.model.named_parameters(): res += p.numel() return res * 4 / 1024 / 1024 def update(self, W, block_size, n_centroids): """ Updates the running statistics when quantizing a new layer. """ # bits per weights bits_per_weight = np.log2(n_centroids) / block_size self.n_quantized_layers += 1 # size of indexing the subvectors of size block_size (in MB) size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024 self.size_index += size_index_layer # size of the centroids stored in float16 (in MB) size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024 self.size_centroids += size_centroids_layer # size of non-compressed layers, e.g. LayerNorms or biases (in MB) size_uncompressed_layer = W.numel() * 4 / 1024 / 1024 self.size_non_quantized -= size_uncompressed_layer def __repr__(self): size_compressed = ( self.size_index + self.size_centroids + self.size_non_quantized ) compression_ratio = self.size_non_compressed_model / size_compressed # NOQA return ( f"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. " f"After quantizing {self.n_quantized_layers} layers, size " f"(indexing + centroids + other): {self.size_index:.2f} MB + " f"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = " f"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x" ) def attrsetter(*items): def resolve_attr(obj, attr): attrs = attr.split(".") head = attrs[:-1] tail = attrs[-1] for name in head: obj = getattr(obj, name) return obj, tail def g(obj, val): for attr in items: resolved_obj, resolved_attr = resolve_attr(obj, attr) setattr(resolved_obj, resolved_attr, val) return g
11,605
33.541667
110
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/pq/modules/qlinear.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F class PQLinear(nn.Module): """ Quantized counterpart of nn.Linear module. Stores the centroid, the assignments and the non-quantized biases. The full weight is re-instantiated at each forward pass. Args: - centroids: centroids of size n_centroids x block_size - assignments: assignments of the centroids to the subvectors of size self.out_features x n_blocks - bias: the non-quantized bias Remarks: - We refer the reader to the official documentation of the nn.Linear module for the other arguments and the behavior of the module - Performance tests on GPU show that this implementation is 15% slower than the non-quantized nn.Linear module for a standard training loop. """ def __init__(self, centroids, assignments, bias, in_features, out_features): super(PQLinear, self).__init__() self.block_size = centroids.size(1) self.n_centroids = centroids.size(0) self.in_features = in_features self.out_features = out_features # check compatibility if self.in_features % self.block_size != 0: raise ValueError("Wrong PQ sizes") if len(assignments) % self.out_features != 0: raise ValueError("Wrong PQ sizes") # define parameters self.centroids = nn.Parameter(centroids, requires_grad=True) self.register_buffer("assignments", assignments) self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) if bias is not None: self.bias = nn.Parameter(bias) else: self.register_parameter("bias", None) @property def weight(self): return ( self.centroids[self.assignments] .reshape(-1, self.out_features, self.block_size) .permute(1, 0, 2) .flatten(1, 2) ) def forward(self, x): return F.linear( x, self.weight, self.bias, ) def extra_repr(self): return f"in_features={self.in_features},\ out_features={self.out_features},\ n_centroids={self.n_centroids},\ block_size={self.block_size},\ bias={self.bias is not None}"
2,547
34.388889
86
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/pq/modules/qconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.utils import _pair class PQConv2d(nn.Module): """ Quantized counterpart of nn.Conv2d module. Stores the centroid, the assignments and the non-quantized biases. The full weight is re-instantiated at each forward pass and autograd automatically computes the gradients with respect to the centroids. Args: - centroids: centroids of size n_centroids x block_size - assignments: assignments of the centroids to the subvectors of size self.out_channels x n_blocks - bias: the non-quantized bias, must be either torch.Tensor or None Remarks: - We refer the reader to the official documentation of the nn.Conv2d module for the other arguments and the behavior of the module. - Performance tests on GPU show that this implementation is 10% slower than the non-quantized nn.Conv2d module for a standard training loop. - During the backward, the gradients are averaged by cluster and not summed. This explains the hook registered to the centroids. """ def __init__( self, centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode="zeros", ): super(PQConv2d, self).__init__() self.block_size = centroids.size(1) self.n_centroids = centroids.size(0) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.padding_mode = padding_mode # check compatibility if in_channels // groups * np.prod(self.kernel_size) % self.block_size != 0: raise ValueError("Wrong PQ sizes") if len(assignments) % out_channels != 0: raise ValueError("Wrong PQ sizes") if in_channels % groups != 0: raise ValueError("in_channels must be divisible by groups") if out_channels % groups != 0: raise ValueError("out_channels must be divisible by groups") # define parameters self.centroids = nn.Parameter(centroids, requires_grad=True) self.register_buffer("assignments", assignments) self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) if bias is not None: self.bias = nn.Parameter(bias) else: self.register_parameter("bias", None) # register hook for averaging gradients per centroids instead of summing self.centroids.register_hook(lambda x: x / self.counts[:, None]) @property def weight(self): return ( self.centroids[self.assignments] .reshape(-1, self.out_channels, self.block_size) .permute(1, 0, 2) .reshape( self.out_channels, self.in_channels // self.groups, *self.kernel_size ) ) def forward(self, x): return F.conv2d( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def extra_repr(self): s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}" if self.padding != (0,) * len(self.padding): s += ", padding={padding}" if self.dilation != (1,) * len(self.dilation): s += ", dilation={dilation}" if self.groups != 1: s += ", groups={groups}" if self.bias is None: s += ", bias=False" if self.padding_mode != "zeros": s += ", padding_mode={padding_mode}" s += ", n_centroids={n_centroids}, block_size={block_size}" return s.format(**self.__dict__)
4,245
35.603448
87
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/pq/modules/qemb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F class PQEmbedding(nn.Module): """ Quantized counterpart of nn.Embedding module. Stores the centroids and the assignments. The full weight is re-instantiated at each forward pass. Args: - centroids: centroids of size n_centroids x block_size - assignments: assignments of the centroids to the subvectors of size self.out_features x n_blocks - bias: the non-quantized bias Remarks: - We refer the reader to the official documentation of the nn.Embedding module for the other arguments and the behavior of the module - Performance tests on GPU show that this implementation is 10% slower than the non-quantized nn.Embedding module for a standard training loop. """ def __init__(self, centroids, assignments, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2., scale_grad_by_freq=False, sparse=False, _weight=None): super(PQEmbedding, self).__init__() self.block_size = centroids.size(1) self.n_centroids = centroids.size(0) self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings' elif padding_idx < 0: assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings' padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse # check compatibility if self.embedding_dim % self.block_size != 0: raise ValueError("Wrong PQ sizes") if len(assignments) % self.num_embeddings != 0: raise ValueError("Wrong PQ sizes") # define parameters self.centroids = nn.Parameter(centroids, requires_grad=True) self.register_buffer("assignments", assignments) self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) @property def weight(self): return ( self.centroids[self.assignments] .reshape(-1, self.num_embeddings, self.block_size) .permute(1, 0, 2) .flatten(1, 2) ) def forward(self, input): return F.embedding( input, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) def extra_repr(self): s = '{num_embeddings}, {embedding_dim}' if self.padding_idx is not None: s += ', padding_idx={padding_idx}' if self.max_norm is not None: s += ', max_norm={max_norm}' if self.norm_type != 2: s += ', norm_type={norm_type}' if self.scale_grad_by_freq is not False: s += ', scale_grad_by_freq={scale_grad_by_freq}' if self.sparse is not False: s += ', sparse=True' s += ', n_centroids={n_centroids}, block_size={block_size}' return s.format(**self.__dict__)
3,515
38.954545
103
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/scalar/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from operator import attrgetter import torch.nn as nn import torch.distributed as dist from ..pq.utils import get_layers, attrsetter from .modules import IntConv2d, IntLinear, IntEmbedding, ActivationQuantizer MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d} def quantize_model_(model, p=0.2, bits=8, update_step=3000): """ Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps """ # quantize all layers quantized_layers = get_layers(model, "(.*?)") for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or (dist.is_initialized() and dist.get_rank() == 0) # recover module module = attrgetter(layer)(model) if is_master_process: logging.info(f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}") # quantization params q_params = {"p": p, "update_step": update_step, "bits": bits, "method": "histogram", "counter": 0} # instantiate the quantized counterpart if isinstance(module, tuple(MAPPING.keys())): QuantizedModule = MAPPING[module.__class__] quantized_module = QuantizedModule.__new__(QuantizedModule) params = module.__dict__ params.update(q_params) quantized_module.__dict__.update(params) else: if is_master_process: logging.info(f"Module {module} not yet supported for quantization") continue # activation quantization a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method="histogram") # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # return name of quantized layers return quantized_layers
2,323
33.176471
107
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/scalar/ops.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch def emulate_int(w, bits, method, scale=None, zero_point=None): q = globals()[f"emulate_int{bits}_{method}"] return q(w, scale=scale, zero_point=zero_point) def quantize(w, scale, zero_point): return (torch.clamp(torch.round(w / scale + zero_point), 0, 255) - zero_point) * scale def emulate_int8_histogram(w, scale=None, zero_point=None): if scale is None: obs = torch.quantization.observer.HistogramObserver() _ = obs(w.float()) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point), scale, zero_point def emulate_int8_channel(w, scale=None, zero_point=None): if scale is None: obs = torch.quantization.observer.PerChannelMinMaxObserver( ch_axis=-1, qscheme=torch.per_channel_symmetric ) _ = obs(w) scale, zero_point, ch_axis = obs.get_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point), scale, zero_point def emulate_int8_tensor(w, scale=None, zero_point=None): if scale is None: obs = torch.quantization.observer.MinMaxObserver() _ = obs(w) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point), scale, zero_point
1,669
33.791667
90
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/scalar/modules/qlinear.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from ..ops import emulate_int class IntLinear(nn.Module): """ Quantized counterpart of the nn.Linear module that applies QuantNoise during training. Args: - in_features: input features - out_features: output features - bias: bias or not - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick. - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, in_features, out_features, bias=True, p=0, update_step=3000, bits=8, method="histogram", ): super(IntLinear, self).__init__() self.in_features = int(in_features) self.out_features = int(out_features) self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) self.chosen_bias = bias if self.chosen_bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter("bias", None) self.reset_parameters() # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.chosen_bias: nn.init.constant_(self.bias, 0.0) return def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 100 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = - self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() # return output output = F.linear(input, weight, self.bias) return output def extra_repr(self): return "in_features={}, out_features={}, bias={}, quant_noise={}, bits={}, method={}".format( self.in_features, self.out_features, self.bias is not None, self.p, self.bits, self.method, )
3,596
31.405405
101
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/scalar/modules/qconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from torch.nn.modules.conv import _ConvNd from torch.nn.modules.utils import _pair from ..ops import emulate_int class IntConv2d(_ConvNd): """ Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training. Args: - standard nn.Conv2d parameters - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-thgourh estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode="zeros", p=0, bits=8, method="histogram", update_step=1000, ): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(IntConv2d, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode, ) # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def _conv_forward(self, input, weight): if self.padding_mode != "zeros": return F.conv2d( F.pad(input, self._padding_repeated_twice, mode=self.padding_mode), weight, self.bias, self.stride, _pair(0), self.dilation, self.groups, ) return F.conv2d( input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 100 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = - self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() # return output output = self._conv_forward(input, weight) return output def extra_repr(self): return ( "in_channels={}, out_channels={}, kernel_size={}, stride={}, " "padding={}, dilation={}, groups={}, bias={}, quant_noise={}, " "bits={}, method={}".format( self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.dilation, self.groups, self.bias is not None, self.p, self.bits, self.method, ) )
4,415
29.040816
95
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/scalar/modules/qemb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from ..ops import emulate_int class IntEmbedding(nn.Module): """ Quantized counterpart of the nn.Embedding module that applies QuantNoise during training. Args: - num_embeddings: number of tokens - embedding_dim: embedding dimension - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2., scale_grad_by_freq=False, sparse=False, _weight=None, p=0, update_step=1000, bits=8, method="histogram", ): super(IntEmbedding, self).__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings' elif padding_idx < 0: assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings' padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if _weight is None: self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim)) self.reset_parameters() else: assert list(_weight.shape) == [num_embeddings, embedding_dim], \ 'Shape of weight does not match num_embeddings and embedding_dim' self.weight = nn.Parameter(_weight) self.sparse = sparse # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def reset_parameters(self): nn.init.normal_(self.weight) if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 1000 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = - self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() # return output output = F.embedding( input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) return output def extra_repr(self): s = '{num_embeddings}, {embedding_dim}' if self.padding_idx is not None: s += ', padding_idx={padding_idx}' if self.max_norm is not None: s += ', max_norm={max_norm}' if self.norm_type != 2: s += ', norm_type={norm_type}' if self.scale_grad_by_freq is not False: s += ', scale_grad_by_freq={scale_grad_by_freq}' if self.sparse is not False: s += ', sparse=True' s += 'quant_noise={p}, bits={bits}, method={method}' return s.format(**self.__dict__)
4,771
34.879699
103
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quantization/scalar/modules/qact.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from ..ops import emulate_int class ActivationQuantizer: """ Fake scalar quantization of the activations using a forward hook. Args: - module. a nn.Module for which we quantize the *post-activations* - p: proportion of activations to quantize, set by default to 1 - update_step: to recompute quantization parameters - bits: number of bits for quantization - method: choose among {"tensor", "histogram", "channel"} - clamp_threshold: to prevent gradients overflow Remarks: - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - For the list of quantization methods and number of bits, see ops.py - To remove the hook from the module, simply call self.handle.remove() - At test time, the activations are fully quantized - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - The activations are hard-clamped in [-clamp_threshold, clamp_threshold] to prevent overflow during the backward pass """ def __init__(self, module, p=1, update_step=1000, bits=8, method="histogram", clamp_threshold=5): self.module = module self.p = p self.update_step = update_step self.counter = 0 self.bits = bits self.method = method self.clamp_threshold = clamp_threshold self.handle = None self.register_hook() def register_hook(self): # forward hook def quantize_hook(module, x, y): # update parameters every 1000 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # train with QuantNoise and evaluate the fully quantized network p = self.p if self.module.training else 1 # quantize activations y_q, self.scale, self.zero_point = emulate_int( y.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(y) mask.bernoulli_(1 - p) noise = (y_q - y).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = - self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) return torch.clamp(y, clamp_low.item(), clamp_high.item()) + noise.detach() # register hook self.handle = self.module.register_forward_hook(quantize_hook)
3,033
36.45679
87
py
RegularizedBN
RegularizedBN-main/fairseq/modules/lightconv_layer/setup.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import CUDAExtension, BuildExtension setup( name='lightconv_layer', ext_modules=[ CUDAExtension('lightconv_cuda', [ 'lightconv_cuda.cpp', 'lightconv_cuda_kernel.cu', ]), ], cmdclass={ 'build_ext': BuildExtension })
545
25
67
py
RegularizedBN
RegularizedBN-main/fairseq/modules/lightconv_layer/lightconv_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.autograd import Function import torch.nn.functional as F import lightconv_cuda from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout class lightconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = lightconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = lightconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors) grad_input, grad_weights = outputs return grad_input, grad_weights, None @with_incremental_state class LightconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0., bias=False, ): super(LightconvLayer, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__) self.weight = nn.Parameter(torch.Tensor(num_heads, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() def upgrade_state_dict_named(self, state_dict, name): prefix = name + '.' if name != '' else '' for k, v in state_dict.items(): if k.endswith(prefix + 'weight'): if v.dim() == 3 and v.size(1) == 1: state_dict[k] = v.squeeze(1) def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.) def forward(self, x, incremental_state=None): # during inference time, incremental BMM is faster if incremental_state is not None: T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) weight = self.weight if self.weight_softmax: weight = F.softmax(weight.float(), dim=1).type_as(weight) weight = weight[:, -x_unfold.size(2):] K = weight.size(1) weight = weight.view(1, H, K).expand(T*B, H, K).contiguous().view(T*B*H, K, 1) weight = self.weight_dropout_module(weight) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output # during training time, use CUDA kernel else: x = x.permute(1, 2, 0).contiguous() weight = self.weight if self.weight_softmax: weight = F.softmax(self.weight, -1) if self.weight_dropout_module.p: weight = self.weight_dropout_module(weight) return lightconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1) def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def half(self): return self._apply(lambda t: t.half() if t.is_floating_point() else t)
4,679
35
104
py
RegularizedBN
RegularizedBN-main/fairseq/logging/progress_bar.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Wrapper around various loggers and progress bars (e.g., tqdm). """ import atexit import json import logging import os import sys from collections import OrderedDict from contextlib import contextmanager from numbers import Number from typing import Optional import torch from .meters import AverageMeter, StopwatchMeter, TimeMeter logger = logging.getLogger(__name__) def progress_bar( iterator, log_format: Optional[str] = None, log_interval: int = 100, epoch: Optional[int] = None, prefix: Optional[str] = None, tensorboard_logdir: Optional[str] = None, default_log_format: str = 'tqdm', ): if log_format is None: log_format = default_log_format if log_format == 'tqdm' and not sys.stderr.isatty(): log_format = 'simple' if log_format == 'json': bar = JsonProgressBar(iterator, epoch, prefix, log_interval) elif log_format == 'none': bar = NoopProgressBar(iterator, epoch, prefix) elif log_format == 'simple': bar = SimpleProgressBar(iterator, epoch, prefix, log_interval) elif log_format == 'tqdm': bar = TqdmProgressBar(iterator, epoch, prefix) else: raise ValueError('Unknown log format: {}'.format(log_format)) if tensorboard_logdir: try: # [FB only] custom wrapper for TensorBoard import palaas # noqa from .fb_tbmf_wrapper import FbTbmfWrapper bar = FbTbmfWrapper(bar, log_interval) except ImportError: bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir) return bar def build_progress_bar( args, iterator, epoch: Optional[int] = None, prefix: Optional[str] = None, default: str = 'tqdm', no_progress_bar: str = 'none', ): """Legacy wrapper that takes an argparse.Namespace.""" if getattr(args, 'no_progress_bar', False): default = no_progress_bar if getattr(args, 'distributed_rank', 0) == 0: tensorboard_logdir = getattr(args, 'tensorboard_logdir', None) else: tensorboard_logdir = None return progress_bar( iterator, log_format=args.log_format, log_interval=args.log_interval, epoch=epoch, prefix=prefix, tensorboard_logdir=tensorboard_logdir, default_log_format=default, ) def format_stat(stat): if isinstance(stat, Number): stat = '{:g}'.format(stat) elif isinstance(stat, AverageMeter): stat = '{:.3f}'.format(stat.avg) elif isinstance(stat, TimeMeter): stat = '{:g}'.format(round(stat.avg)) elif isinstance(stat, StopwatchMeter): stat = '{:g}'.format(round(stat.sum)) elif torch.is_tensor(stat): stat = stat.tolist() return stat class BaseProgressBar(object): """Abstract class for progress bars.""" def __init__(self, iterable, epoch=None, prefix=None): self.iterable = iterable self.n = getattr(iterable, 'n', 0) self.epoch = epoch self.prefix = '' if epoch is not None: self.prefix += 'epoch {:03d}'.format(epoch) if prefix is not None: self.prefix += ' | {}'.format(prefix) def __len__(self): return len(self.iterable) def __enter__(self): return self def __exit__(self, *exc): return False def __iter__(self): raise NotImplementedError def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" raise NotImplementedError def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" raise NotImplementedError def _str_commas(self, stats): return ', '.join(key + '=' + stats[key].strip() for key in stats.keys()) def _str_pipes(self, stats): return ' | '.join(key + ' ' + stats[key].strip() for key in stats.keys()) def _format_stats(self, stats): postfix = OrderedDict(stats) # Preprocess stats according to datatype for key in postfix.keys(): postfix[key] = str(format_stat(postfix[key])) return postfix @contextmanager def rename_logger(logger, new_name): old_name = logger.name if new_name is not None: logger.name = new_name yield logger logger.name = old_name class JsonProgressBar(BaseProgressBar): """Log output in JSON format.""" def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.i = None self.size = None def __iter__(self): self.size = len(self.iterable) for i, obj in enumerate(self.iterable, start=self.n): self.i = i yield obj def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" step = step or self.i or 0 if ( step > 0 and self.log_interval is not None and step % self.log_interval == 0 ): update = ( self.epoch - 1 + (self.i + 1) / float(self.size) if self.epoch is not None else None ) stats = self._format_stats(stats, epoch=self.epoch, update=update) with rename_logger(logger, tag): logger.info(json.dumps(stats)) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" self.stats = stats if tag is not None: self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()]) stats = self._format_stats(self.stats, epoch=self.epoch) with rename_logger(logger, tag): logger.info(json.dumps(stats)) def _format_stats(self, stats, epoch=None, update=None): postfix = OrderedDict() if epoch is not None: postfix['epoch'] = epoch if update is not None: postfix['update'] = round(update, 3) # Preprocess stats according to datatype for key in stats.keys(): postfix[key] = format_stat(stats[key]) return postfix class NoopProgressBar(BaseProgressBar): """No logging.""" def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) def __iter__(self): for obj in self.iterable: yield obj def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" pass def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" pass class SimpleProgressBar(BaseProgressBar): """A minimal logger for non-TTY environments.""" def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.i = None self.size = None def __iter__(self): self.size = len(self.iterable) for i, obj in enumerate(self.iterable, start=self.n): self.i = i yield obj def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" step = step or self.i or 0 if ( step > 0 and self.log_interval is not None and step % self.log_interval == 0 ): stats = self._format_stats(stats) postfix = self._str_commas(stats) with rename_logger(logger, tag): logger.info( '{}: {:5d} / {:d} {}' .format(self.prefix, self.i + 1, self.size, postfix) ) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" postfix = self._str_pipes(self._format_stats(stats)) with rename_logger(logger, tag): logger.info('{} | {}'.format(self.prefix, postfix)) class TqdmProgressBar(BaseProgressBar): """Log to tqdm.""" def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) from tqdm import tqdm self.tqdm = tqdm( iterable, self.prefix, leave=False, disable=(logger.getEffectiveLevel() > logging.INFO), ) def __iter__(self): return iter(self.tqdm) def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" self.tqdm.set_postfix(self._format_stats(stats), refresh=False) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" postfix = self._str_pipes(self._format_stats(stats)) with rename_logger(logger, tag): logger.info('{} | {}'.format(self.prefix, postfix)) try: _tensorboard_writers = {} from tensorboardX import SummaryWriter except ImportError: SummaryWriter = None def _close_writers(): for w in _tensorboard_writers.values(): w.close() atexit.register(_close_writers) class TensorboardProgressBarWrapper(BaseProgressBar): """Log to tensorboard.""" def __init__(self, wrapped_bar, tensorboard_logdir): self.wrapped_bar = wrapped_bar self.tensorboard_logdir = tensorboard_logdir if SummaryWriter is None: logger.warning( "tensorboard not found, please install with: pip install tensorboardX" ) def _writer(self, key): if SummaryWriter is None: return None _writers = _tensorboard_writers if key not in _writers: _writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key)) _writers[key].add_text('sys.argv', " ".join(sys.argv)) return _writers[key] def __iter__(self): return iter(self.wrapped_bar) def log(self, stats, tag=None, step=None): """Log intermediate stats to tensorboard.""" self._log_to_tensorboard(stats, tag, step) self.wrapped_bar.log(stats, tag=tag, step=step) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" self._log_to_tensorboard(stats, tag, step) self.wrapped_bar.print(stats, tag=tag, step=step) def _log_to_tensorboard(self, stats, tag=None, step=None): writer = self._writer(tag or '') if writer is None: return if step is None: step = stats['num_updates'] for key in stats.keys() - {'num_updates'}: if isinstance(stats[key], AverageMeter): writer.add_scalar(key, stats[key].val, step) elif isinstance(stats[key], Number): writer.add_scalar(key, stats[key], step) writer.flush()
11,082
29.786111
89
py
RegularizedBN
RegularizedBN-main/fairseq/logging/metrics.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ A standalone module for aggregating metrics. Metrics can be logged from anywhere using the `log_*` functions defined in this module. The logged values will be aggregated dynamically based on the aggregation context in which the logging occurs. See the :func:`aggregate` context manager for more details. """ from collections import defaultdict, OrderedDict import contextlib import time from typing import Callable, Dict, List, Optional import uuid from .meters import * # Aggregation contexts are considered "active" when inside the scope # created by the :func:`aggregate` context manager. _aggregators = OrderedDict() _active_aggregators = OrderedDict() _active_aggregators_cnt = defaultdict(lambda: 0) def reset() -> None: """Reset all metrics aggregators.""" _aggregators.clear() _active_aggregators.clear() _active_aggregators_cnt.clear() # The "default" aggregator observes all logged values. _aggregators["default"] = MetersDict() _active_aggregators["default"] = _aggregators["default"] _active_aggregators_cnt["default"] = 1 reset() @contextlib.contextmanager def aggregate(name: Optional[str] = None, new_root: bool = False): """Context manager to aggregate metrics under a given name. Aggregations can be nested. If *new_root* is ``False``, then logged metrics will be recorded along the entire stack of nested aggregators, including a global "default" aggregator. If *new_root* is ``True``, then this aggregator will be the root of a new aggregation stack, thus bypassing any parent aggregators. Note that aggregation contexts are uniquely identified by their *name* (e.g., train, valid). Creating a context with an existing name will reuse the corresponding :class:`MetersDict` instance. If no name is given, then a temporary aggregator will be created. Usage:: with metrics.aggregate("train"): for step, batch in enumerate(epoch): with metrics.aggregate("train_inner") as agg: metrics.log_scalar("loss", get_loss(batch)) if step % log_interval == 0: print(agg.get_smoothed_value("loss")) agg.reset() print(metrics.get_smoothed_values("train")["loss"]) Args: name (str): name of the aggregation. Defaults to a random/temporary name if not given explicitly. new_root (bool): make this aggregation the root of a new aggregation stack. """ if name is None: # generate a temporary name name = str(uuid.uuid4()) assert name not in _aggregators agg = MetersDict() else: assert name != "default" agg = _aggregators.setdefault(name, MetersDict()) if new_root: backup_aggregators = _active_aggregators.copy() _active_aggregators.clear() backup_aggregators_cnt = _active_aggregators_cnt.copy() _active_aggregators_cnt.clear() _active_aggregators[name] = agg _active_aggregators_cnt[name] += 1 yield agg _active_aggregators_cnt[name] -= 1 if _active_aggregators_cnt[name] == 0 and name in _active_aggregators: del _active_aggregators[name] if new_root: _active_aggregators.clear() _active_aggregators.update(backup_aggregators) _active_aggregators_cnt.clear() _active_aggregators_cnt.update(backup_aggregators_cnt) def get_active_aggregators() -> List[MetersDict]: return list(_active_aggregators.values()) def log_scalar( key: str, value: float, weight: float = 1, priority: int = 10, round: Optional[int] = None, ): """Log a scalar value. Args: key (str): name of the field to log value (float): value to log weight (float): weight that this value contributes to the average. A weight of 0 will always log the latest value. priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, AverageMeter(round=round), priority) agg[key].update(value, weight) def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20): """Log a scalar value derived from other meters. Args: key (str): name of the field to log fn (Callable[[MetersDict], float]): function that takes a single argument *meters* and returns the derived value priority (int): smaller values are logged earlier in the output """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, MetersDict._DerivedMeter(fn), priority) def log_speed( key: str, value: float, priority: int = 30, round: Optional[int] = None, ): """Log the rate of some quantity per second. Args: key (str): name of the field to log value (float): value to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, TimeMeter(round=round), priority) agg[key].reset() # reset meter on the first call else: agg[key].update(value) def log_start_time(key: str, priority: int = 40, round: Optional[int] = None): """Log the duration of some event in seconds. The duration will be computed once :func:`log_stop_time` is called. Args: key (str): name of the field to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, StopwatchMeter(round=round), priority) agg[key].start() def log_stop_time(key: str, weight: float = 0., prehook=None): """Log the duration of some event in seconds. The duration will be computed since :func:`log_start_time` was called. Set weight > 0 to report the average time instead of the sum. Args: key (str): name of the field to log weight (float): weight that this time contributes to the average prehook (function, no arguments): will be called before the timer is stopped. For example, use prehook=torch.cuda.synchronize to make sure all gpu operations are done before timer is stopped. """ for agg in get_active_aggregators(): if key in agg: agg[key].stop(weight, prehook) def log_custom( new_meter_fn: Callable[[], Meter], key: str, *args, priority: int = 50, **kwargs, ): """Log using a custom Meter. Any extra *args* or *kwargs* will be passed through to the Meter's *update* method. Args: new_meter_fn (Callable[[], Meter]): function that returns a new Meter instance key (str): name of the field to log priority (int): smaller values are logged earlier in the output """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, new_meter_fn(), priority) agg[key].update(*args, **kwargs) def reset_meter(name: str, key: str) -> None: """Reset Meter instance aggregated under a given *name* and *key*.""" meter = get_meter(name, key) if meter is not None: meter.reset() def reset_meters(name: str) -> None: """Reset Meter instances aggregated under a given *name*.""" meters = get_meters(name) if meters is not None: meters.reset() def get_meter(name: str, key: str) -> Meter: """Get a single Meter instance aggregated under *name* and *key*. Returns: Meter or None if no metrics have been logged under *name* and *key*. """ if name not in _aggregators: return None return _aggregators[name].get(key, None) def get_meters(name: str) -> MetersDict: """Get Meter instances aggregated under a given *name*. Returns: MetersDict or None if no metrics have been logged under *name*. """ return _aggregators.get(name, None) def get_smoothed_value(name: str, key: str) -> float: """Get a single smoothed value. Raises: KeyError: if no metrics have been logged under *name* and *key*. """ return _aggregators[name].get_smoothed_value(key) def get_smoothed_values(name: str) -> Dict[str, float]: """Get smoothed values aggregated under a given *name*. Raises: KeyError: if no metrics have been logged under *name*. """ return _aggregators[name].get_smoothed_values() def state_dict(): return OrderedDict([ (name, agg.state_dict()) for name, agg in _aggregators.items() ]) def load_state_dict(state_dict): for name, agg_state in state_dict.items(): _aggregators[name] = MetersDict() _aggregators[name].load_state_dict(agg_state)
9,325
30.938356
81
py
RegularizedBN
RegularizedBN-main/fairseq/logging/meters.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import bisect from collections import OrderedDict import time from typing import Dict, Optional try: import torch def type_as(a, b): if torch.is_tensor(a) and torch.is_tensor(b): return a.to(b) else: return a except ImportError: torch = None def type_as(a, b): return a try: import numpy as np except ImportError: np = None class Meter(object): """Base class for Meters.""" def __init__(self): pass def state_dict(self): return {} def load_state_dict(self, state_dict): pass def reset(self): raise NotImplementedError @property def smoothed_value(self) -> float: """Smoothed value used for logging.""" raise NotImplementedError def safe_round(number, ndigits): if hasattr(number, '__round__'): return round(number, ndigits) elif torch is not None and torch.is_tensor(number) and number.numel() == 1: return safe_round(number.item(), ndigits) elif np is not None and np.ndim(number) == 0 and hasattr(number, 'item'): return safe_round(number.item(), ndigits) else: return number class AverageMeter(Meter): """Computes and stores the average and current value""" def __init__(self, round: Optional[int] = None): self.round = round self.reset() def reset(self): self.val = None # most recent update self.sum = 0 # sum from all updates self.count = 0 # total n from all updates def update(self, val, n=1): if val is not None: self.val = val if n > 0: self.sum = type_as(self.sum, val) + (val * n) self.count = type_as(self.count, n) + n def state_dict(self): return { 'val': self.val, 'sum': self.sum, 'count': self.count, 'round': self.round, } def load_state_dict(self, state_dict): self.val = state_dict['val'] self.sum = state_dict['sum'] self.count = state_dict['count'] self.round = state_dict.get('round', None) @property def avg(self): return self.sum / self.count if self.count > 0 else self.val @property def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val class TimeMeter(Meter): """Computes the average occurrence of some event per second""" def __init__( self, init: int = 0, n: int = 0, round: Optional[int] = None, ): self.round = round self.reset(init, n) def reset(self, init=0, n=0): self.init = init self.start = time.perf_counter() self.n = n self.i = 0 def update(self, val=1): self.n = type_as(self.n, val) + val self.i += 1 def state_dict(self): return { 'init': self.elapsed_time, 'n': self.n, 'round': self.round, } def load_state_dict(self, state_dict): if 'start' in state_dict: # backwards compatibility for old state_dicts self.reset(init=state_dict['init']) else: self.reset(init=state_dict['init'], n=state_dict['n']) self.round = state_dict.get('round', None) @property def avg(self): return self.n / self.elapsed_time @property def elapsed_time(self): return self.init + (time.perf_counter() - self.start) @property def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val class StopwatchMeter(Meter): """Computes the sum/avg duration of some event in seconds""" def __init__(self, round: Optional[int] = None): self.round = round self.sum = 0 self.n = 0 self.start_time = None def start(self): self.start_time = time.perf_counter() def stop(self, n=1, prehook=None): if self.start_time is not None: if prehook is not None: prehook() delta = time.perf_counter() - self.start_time self.sum = self.sum + delta self.n = type_as(self.n, n) + n def reset(self): self.sum = 0 # cumulative time during which stopwatch was active self.n = 0 # total n across all start/stop self.start() def state_dict(self): return { 'sum': self.sum, 'n': self.n, 'round': self.round, } def load_state_dict(self, state_dict): self.sum = state_dict['sum'] self.n = state_dict['n'] self.start_time = None self.round = state_dict.get('round', None) @property def avg(self): return self.sum / self.n if self.n > 0 else self.sum @property def elapsed_time(self): if self.start_time is None: return 0. return time.perf_counter() - self.start_time @property def smoothed_value(self) -> float: val = self.avg if self.sum > 0 else self.elapsed_time if self.round is not None and val is not None: val = safe_round(val, self.round) return val class MetersDict(OrderedDict): """A sorted dictionary of :class:`Meters`. Meters are sorted according to a priority that is given when the meter is first added to the dictionary. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.priorities = [] def __setitem__(self, key, value): assert key not in self, "MetersDict doesn't support reassignment" priority, value = value bisect.insort(self.priorities, (priority, len(self.priorities), key)) super().__setitem__(key, value) for _, _, key in self.priorities: # reorder dict to match priorities self.move_to_end(key) def add_meter(self, key, meter, priority): self.__setitem__(key, (priority, meter)) def state_dict(self): return [ (pri, key, self[key].__class__.__name__, self[key].state_dict()) for pri, _, key in self.priorities # can't serialize DerivedMeter instances if not isinstance(self[key], MetersDict._DerivedMeter) ] def load_state_dict(self, state_dict): self.clear() self.priorities.clear() for pri, key, meter_cls, meter_state in state_dict: meter = globals()[meter_cls]() meter.load_state_dict(meter_state) self.add_meter(key, meter, pri) def get_smoothed_value(self, key: str) -> float: """Get a single smoothed value.""" meter = self[key] if isinstance(meter, MetersDict._DerivedMeter): return meter.fn(self) else: return meter.smoothed_value def get_smoothed_values(self) -> Dict[str, float]: """Get all smoothed values.""" return OrderedDict([ (key, self.get_smoothed_value(key)) for key in self.keys() if not key.startswith("_") ]) def reset(self): """Reset Meter instances.""" for meter in self.values(): if isinstance(meter, MetersDict._DerivedMeter): continue meter.reset() class _DerivedMeter(Meter): """A Meter whose values are derived from other Meters.""" def __init__(self, fn): self.fn = fn def reset(self): pass
7,885
26.477352
79
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/fairseq_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect from typing import Any, Dict, List from torch.nn.modules.loss import _Loss from fairseq import metrics, utils class FairseqCriterion(_Loss): def __init__(self, task): super().__init__() self.task = task if hasattr(task, 'target_dictionary'): tgt_dict = task.target_dictionary self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100 @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" pass @classmethod def build_criterion(cls, args, task): """Construct a criterion from command-line args.""" # Criterions can override this, but for convenience we also try # to automatically map argparse.Namespace keys to corresponding # arguments in the __init__. init_args = {} for p in inspect.signature(cls).parameters.values(): if ( p.kind == p.POSITIONAL_ONLY or p.kind == p.VAR_POSITIONAL or p.kind == p.VAR_KEYWORD ): # we haven't implemented inference for these argument types, # but PRs welcome :) raise NotImplementedError('{} not supported'.format(p.kind)) assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY} if p.name == 'task': init_args['task'] = task elif hasattr(args, p.name): init_args[p.name] = getattr(args, p.name) elif p.default != p.empty: pass # we'll use the default value else: raise NotImplementedError( 'Unable to infer Criterion arguments, please implement ' '{}.build_criterion'.format(cls.__name__) ) return cls(**init_args) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ raise NotImplementedError @staticmethod def aggregate_logging_outputs( logging_outputs: List[Dict[str, Any]], ) -> Dict[str, Any]: """Aggregate logging outputs from data parallel training.""" utils.deprecation_warning( 'The aggregate_logging_outputs API is deprecated. ' 'Please use the reduce_metrics API instead.' ) raise NotImplementedError @classmethod def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None: """Aggregate logging outputs from data parallel training.""" utils.deprecation_warning( 'Criterions should implement the reduce_metrics API. ' 'Falling back to deprecated aggregate_logging_outputs API.' ) agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs) for k, v in agg_logging_outputs.items(): if k in {'nsentences', 'ntokens', 'sample_size'}: continue metrics.log_scalar(k, v) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return False class LegacyFairseqCriterion(FairseqCriterion): def __init__(self, args, task): super().__init__(task=task) self.args = args utils.deprecation_warning( 'Criterions should take explicit arguments instead of an ' 'argparse.Namespace object, please update your criterion by ' 'extending FairseqCriterion instead of LegacyFairseqCriterion.' ) @classmethod def build_criterion(cls, args, task): """Construct a criterion from command-line args.""" return cls(args, task)
4,258
34.491667
79
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/nat_loss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch.nn.functional as F import torch from torch import Tensor from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion("nat_loss") class LabelSmoothedDualImitationCriterion(FairseqCriterion): def __init__(self, task, label_smoothing): super().__init__(task) self.label_smoothing = label_smoothing @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" parser.add_argument( '--label-smoothing', default=0., type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing', ) def _compute_loss( self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0 ): """ outputs: batch x len x d_model targets: batch x len masks: batch x len policy_logprob: if there is some policy depends on the likelihood score as rewards. """ def mean_ds(x: Tensor, dim=None) -> Tensor: return ( x.float().mean().type_as(x) if dim is None else x.float().mean(dim).type_as(x) ) if masks is not None: outputs, targets = outputs[masks], targets[masks] if masks is not None and not masks.any(): nll_loss = torch.tensor(0) loss = nll_loss else: logits = F.log_softmax(outputs, dim=-1) if targets.dim() == 1: losses = F.nll_loss(logits, targets.to(logits.device), reduction='none') else: # soft-labels losses = F.kl_div(logits, targets.to(logits.device), reduction='none') losses = losses.sum(-1) nll_loss = mean_ds(losses) if label_smoothing > 0: loss = nll_loss * ( 1 - label_smoothing) - mean_ds(logits) * label_smoothing else: loss = nll_loss loss = loss * factor return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor} def _custom_loss(self, loss, name="loss", factor=1.0): return {"name": name, "loss": loss, "factor": factor} def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ nsentences, ntokens = sample["nsentences"], sample["ntokens"] # B x T src_tokens, src_lengths = ( sample["net_input"]["src_tokens"], sample["net_input"]["src_lengths"], ) tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"] outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens) losses, nll_loss = [], [] for obj in outputs: if outputs[obj].get("loss", None) is None: _losses = self._compute_loss( outputs[obj].get("out"), outputs[obj].get("tgt"), outputs[obj].get("mask", None), outputs[obj].get("ls", 0.0), name=obj + '-loss', factor=outputs[obj].get("factor", 1.0) ) else: _losses = self._custom_loss( outputs[obj].get("loss"), name=obj + '-loss', factor=outputs[obj].get("factor", 1.0) ) losses += [_losses] if outputs[obj].get("nll_loss", False): nll_loss += [_losses.get("nll_loss", 0.0)] loss = sum(l["loss"] for l in losses) nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 \ else loss.new_tensor(0) # NOTE: # we don't need to use sample_size as denominator for the gradient # here sample_size is just used for logging sample_size = 1 logging_output = { "loss": loss.data, "nll_loss": nll_loss.data, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } for l in losses: logging_output[l["name"]] = ( utils.item(l["loss"].data / l["factor"]) if reduce else l[["loss"]].data / l["factor"] ) return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" sample_size = utils.item(sum(log.get("sample_size", 0) for log in logging_outputs)) loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs)) metrics.log_scalar('loss', loss / sample_size / math.log(2), sample_size, round=3) metrics.log_scalar('nll_loss', nll_loss / sample_size / math.log(2), sample_size, round=3) metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg)) for key in logging_outputs[0]: if key[-5:] == "-loss": val = sum(log.get(key, 0) for log in logging_outputs) metrics.log_scalar( key[:-5], val / sample_size / math.log(2) if sample_size > 0 else 0.0, sample_size, round=3, ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
6,238
34.856322
98
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/wav2vec_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.logging.meters import safe_round @register_criterion('wav2vec') class Wav2vecCriterion(FairseqCriterion): def __init__(self, task, infonce=False, loss_weights=None, log_keys=None): super().__init__(task) self.infonce = infonce self.loss_weights = None if loss_weights is None else eval(loss_weights) self.log_keys = [] if log_keys is None else eval(log_keys) @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" # fmt: off parser.add_argument('--infonce', action='store_true', help='if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)') parser.add_argument('--loss-weights', type=str, default=None, help='weights for additional loss terms (not first one)') parser.add_argument('--log-keys', type=str, default=None, help='output keys to log') def forward(self, model, sample, reduce=True, log_pred=False): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample['net_input']) logits = model.get_logits(net_output).float() target = model.get_targets(sample, net_output) weights = None if hasattr(model, 'get_target_weights') and not self.infonce: weights = model.get_target_weights(target, net_output) if torch.is_tensor(weights): weights = weights.float() losses = [] if self.infonce: loss = F.cross_entropy(logits, target, reduction="sum" if reduce else "none",) else: loss = F.binary_cross_entropy_with_logits(logits, target.float(), weights, reduction="sum" if reduce else "none",) sample_size = target.numel() if self.infonce else target.long().sum().item() losses.append(loss.detach().clone()) if self.loss_weights is not None: assert hasattr(model, "get_extra_losses") extra_losses = model.get_extra_losses(net_output) if torch.is_tensor(extra_losses): extra_losses = [extra_losses] if len(self.loss_weights) == 1 and len(extra_losses) != 1: self.loss_weights = [self.loss_weights[0]] * len(extra_losses) assert len(extra_losses) == len(self.loss_weights), f'{len(extra_losses)}, {len(self.loss_weights)}' for p, coef in zip(extra_losses, self.loss_weights): if coef != 0 and p is not None: p = coef * p.float() * sample_size loss += p losses.append(p) logging_output = { 'loss': loss.item() if reduce else loss, 'ntokens': sample_size, 'nsentences': sample['id'].numel(), 'sample_size': sample_size, } for lk in self.log_keys: if lk in net_output: logging_output[lk] = float((net_output[lk])) if len(losses) > 1: for i, l in enumerate(losses): logging_output[f'loss_{i}'] = l.item() if self.infonce: with torch.no_grad(): if logits.numel() == 0: corr = 0 count = 0 else: assert logits.dim() > 1, logits.shape max = logits.argmax(-1) == 0 min = logits.argmin(-1) == 0 both = max & min corr = max.long().sum().item() - both.long().sum().item() count = max.numel() logging_output["correct"] = corr logging_output["count"] = count if log_pred: logging_output['logits'] = logits.cpu().numpy() logging_output['target'] = target.cpu().numpy() return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs)) ntokens = utils.item(sum(log.get('ntokens', 0) for log in logging_outputs)) nsentences = utils.item(sum(log.get('nsentences', 0) for log in logging_outputs)) sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs)) metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3) metrics.log_scalar('ntokens', ntokens) metrics.log_scalar('nsentences', nsentences) correct = sum(log.get("correct", 0) for log in logging_outputs) metrics.log_scalar("_correct", correct) total = sum(log.get("count", 0) for log in logging_outputs) metrics.log_scalar("_total", total) if total > 0: metrics.log_derived( "accuracy", lambda meters: safe_round(meters["_correct"].sum / meters["_total"].sum, 5) if meters["_total"].sum > 0 else float("nan"), ) builtin_keys = {'loss', 'ntokens', 'nsentences', 'sample_size', 'correct', 'count'} for k in logging_outputs[0]: if k not in builtin_keys: val = sum(log.get(k, 0) for log in logging_outputs) / len(logging_outputs) if k.startswith('loss'): metrics.log_scalar(k, val / sample_size / math.log(2), sample_size) else: metrics.log_scalar(k, val, round=3) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return False
6,437
39.490566
126
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/legacy_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion def compute_cross_entropy_loss(logits, targets, ignore_index=-100): """ Function to compute the cross entropy loss. The default value of ignore_index is the same as the default value for F.cross_entropy in pytorch. """ assert logits.size(0) == targets.size(-1), \ "Logits and Targets tensor shapes don't match up" loss = F.nll_loss( F.log_softmax(logits, -1, dtype=torch.float32), targets, reduction="sum", ignore_index=ignore_index, ) return loss @register_criterion('legacy_masked_lm_loss') class LegacyMaskedLmLoss(FairseqCriterion): """ Implementation for the loss used in masked language model (MLM) training. This optionally also computes the next sentence prediction (NSP) loss and adds it to the overall loss based on the specified args. There are three cases to consider: 1) Generic MLM training without NSP loss. In this case sentence_targets and sentence_logits are both None. 2) BERT training without NSP loss. In this case sentence_targets is not None but sentence_logits is None and we should not be computing a sentence level loss. 3) BERT training with NSP loss. In this case both sentence_targets and sentence_logits are not None and we should be computing a sentence level loss. The weight of the sentence level loss is specified as an argument. """ def __init__(self, task, masked_lm_only, nsp_loss_weight): super().__init__(task) self.masked_lm_only = masked_lm_only self.nsp_loss_weight = nsp_loss_weight @staticmethod def add_args(parser): """Args for MaskedLM Loss""" # Default for masked_lm_only is False so as to not break BERT training parser.add_argument('--masked-lm-only', default=False, action='store_true', help='compute MLM loss only') parser.add_argument('--nsp-loss-weight', default=1.0, type=float, help='weight for next sentence prediction' ' loss (default 1)') def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ lm_logits, output_metadata = model(**sample["net_input"]) # reshape lm_logits from (N,T,C) to (N*T,C) lm_logits = lm_logits.view(-1, lm_logits.size(-1)) lm_targets = sample['lm_target'].view(-1) lm_loss = compute_cross_entropy_loss( lm_logits, lm_targets, self.padding_idx) # compute the number of tokens for which loss is computed. This is used # to normalize the loss ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel() loss = lm_loss / ntokens nsentences = sample['nsentences'] # nsentences = 0 # Compute sentence loss if masked_lm_only is False sentence_loss = None if not self.masked_lm_only: sentence_logits = output_metadata['sentence_logits'] sentence_targets = sample['sentence_target'].view(-1) # This needs to be recomputed due to some differences between # TokenBlock and BlockPair dataset. This can be resolved with a # refactor of BERTModel which we will do in the future. # TODO: Remove this after refactor of BERTModel nsentences = sentence_targets.size(0) # Check for logits being none which can happen when remove_heads # is set to true in the BERT model. Ideally we should set # masked_lm_only to true in this case, but that requires some # refactor in the BERT model. if sentence_logits is not None: sentence_loss = compute_cross_entropy_loss( sentence_logits, sentence_targets) loss += self.nsp_loss_weight * (sentence_loss / nsentences) # NOTE: as we are summing up per token mlm loss and per sentence nsp loss # we don't need to use sample_size as denominator for the gradient # here sample_size is just used for logging sample_size = 1 logging_output = { 'loss': utils.item(loss.data) if reduce else loss.data, 'lm_loss': utils.item(lm_loss.data) if reduce else lm_loss.data, # sentence loss is not always computed 'sentence_loss': ( ( utils.item(sentence_loss.data) if reduce else sentence_loss.data ) if sentence_loss is not None else 0.0 ), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size, } return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" lm_loss_sum = sum(log.get('lm_loss', 0) for log in logging_outputs) sentence_loss_sum = sum( log.get('sentence_loss', 0) for log in logging_outputs) ntokens = sum(log.get('ntokens', 0) for log in logging_outputs) nsentences = sum(log.get('nsentences', 0) for log in logging_outputs) sample_size = sum(log.get('sample_size', 0) for log in logging_outputs) agg_loss = sum(log.get('loss', 0) for log in logging_outputs) metrics.log_scalar('loss', agg_loss / sample_size / math.log(2) if sample_size > 0 else 0., sample_size, round=3) metrics.log_scalar('lm_loss', lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0., ntokens, round=3) metrics.log_scalar('sentence_loss', sentence_loss_sum / nsentences / math.log(2) if nsentences > 0 else 0., nsentences, round=3) metrics.log_scalar('nll_loss', lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0., ntokens, round=3) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
6,769
43.248366
136
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/adaptive_loss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion('adaptive_loss') class AdaptiveLoss(FairseqCriterion): """This is an implementation of the loss function accompanying the adaptive softmax approximation for graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs" (http://arxiv.org/abs/1609.04309).""" def __init__(self, task, sentence_avg): super().__init__(task) self.sentence_avg = sentence_avg @classmethod def build_criterion(cls, args, task): if getattr(args, 'ddp_backend', None) == 'c10d': raise Exception( 'AdaptiveLoss is not compatible with the c10d ' 'version of DistributedDataParallel. Please use ' '`--ddp-backend=no_c10d` instead.' ) return cls(task, args.sentence_avg) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ assert hasattr(model.decoder, 'adaptive_softmax') and model.decoder.adaptive_softmax is not None adaptive_softmax = model.decoder.adaptive_softmax net_output = model(**sample['net_input']) orig_target = model.get_targets(sample, net_output) nsentences = orig_target.size(0) orig_target = orig_target.view(-1) bsz = orig_target.size(0) logits, target = adaptive_softmax(net_output[0], orig_target) assert len(target) == len(logits) loss = net_output[0].new(1 if reduce else bsz).zero_() for i in range(len(target)): if target[i] is not None: assert (target[i].min() >= 0 and target[i].max() <= logits[i].size(1)) loss += F.cross_entropy( logits[i], target[i], ignore_index=self.padding_idx, reduction='sum' if reduce else 'none', ) orig = utils.strip_pad(orig_target, self.padding_idx) ntokens = orig.numel() sample_size = sample['target'].size(0) if self.sentence_avg else ntokens logging_output = { 'loss': loss.data, 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size, } return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs)) ntokens = utils.item(sum(log.get('ntokens', 0) for log in logging_outputs)) sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs)) metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3) if sample_size != ntokens: metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3) metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg)) else: metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg)) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
3,981
38.039216
105
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, modules, utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion('masked_lm') class MaskedLmLoss(FairseqCriterion): """ Implementation for the loss used in masked language model (MLM) training. """ def __init__(self, task, tpu=False): super().__init__(task) self.tpu = tpu def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ masked_tokens = sample['target'].ne(self.padding_idx) sample_size = masked_tokens.int().sum() # Rare: when all tokens are masked, project all tokens. # We use torch.where to avoid device-to-host transfers, # except on CPU where torch.where is not well supported # (see github.com/pytorch/pytorch/issues/26247). if self.tpu: masked_tokens = None # always project all tokens on TPU elif masked_tokens.device == torch.device('cpu'): if not masked_tokens.any(): masked_tokens = None else: masked_tokens = torch.where( masked_tokens.any(), masked_tokens, masked_tokens.new([True]), ) logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0] targets = model.get_targets(sample, [logits]) if masked_tokens is not None: targets = targets[masked_tokens] loss = modules.cross_entropy( logits.view(-1, logits.size(-1)), targets.view(-1), reduction='sum', ignore_index=self.padding_idx, ) logging_output = { 'loss': loss if self.tpu else loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample['nsentences'], 'sample_size': sample_size, } return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get('loss', 0) for log in logging_outputs) sample_size = sum(log.get('sample_size', 0) for log in logging_outputs) metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3) metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg)) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
3,159
34.505618
94
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/ctc.py
# All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from argparse import Namespace import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.data.data_utils import post_process from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.logging.meters import safe_round @register_criterion("ctc") class CtcCriterion(FairseqCriterion): def __init__(self, task, wer_args, zero_infinity, sentence_avg, remove_bpe): super().__init__(task) self.blank_idx = task.target_dictionary.bos() self.pad_idx = task.target_dictionary.pad() self.eos_idx = task.target_dictionary.eos() self.post_process = remove_bpe if remove_bpe else "letter" if wer_args is not None: from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder wer_compute_kenlm, wer_lexicon, lm_w, ws_w = eval(wer_args) dec_args = Namespace() dec_args.nbest = 1 dec_args.criterion = "ctc" dec_args.kenlm_model = wer_compute_kenlm dec_args.lexicon = wer_lexicon dec_args.beam = 50 dec_args.beam_size_token = min(50, len(task.target_dictionary)) dec_args.beam_threshold = min(50, len(task.target_dictionary)) dec_args.lm_weight = lm_w dec_args.word_score = ws_w dec_args.unk_weight = -math.inf dec_args.sil_weight = 0 self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary) else: self.w2l_decoder = None self.zero_infinity = zero_infinity self.sentence_avg = sentence_avg @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" parser.add_argument( "--zero-infinity", action="store_true", help="zero inf loss" ) try: parser.add_argument( "--remove-bpe", "--post-process", default="letter", help="remove BPE tokens before scoring (can be set to sentencepiece, letter, and more)", ) except: pass # this option might have been added from eval args parser.add_argument( "--wer-args", type=str, default=None, help="options for wer computation on valid set using 4 gram lm. this should be a tuple of 4 elements: path to 4-gram lm, \ path to lexicon, lm score, word score", ) def forward(self, model, sample, reduce=True): net_output = model(**sample["net_input"]) lprobs = model.get_normalized_probs( net_output, log_probs=True ).contiguous() # (T, B, C) from the encoder if "src_lengths" in sample["net_input"]: input_lengths = sample["net_input"]["src_lengths"] else: non_padding_mask = ~net_output["padding_mask"] input_lengths = non_padding_mask.long().sum(-1) pad_mask = (sample["target"] != self.pad_idx) & ( sample["target"] != self.eos_idx ) targets_flat = sample["target"].masked_select(pad_mask) target_lengths = sample["target_lengths"] with torch.backends.cudnn.flags(enabled=False): loss = F.ctc_loss( lprobs, targets_flat, input_lengths, target_lengths, blank=self.blank_idx, reduction="sum", zero_infinity=self.zero_infinity, ) ntokens = ( sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item() ) sample_size = sample["target"].size(0) if self.sentence_avg else ntokens logging_output = { "loss": utils.item(loss.data), # * sample['ntokens'], "ntokens": ntokens, "nsentences": sample["id"].numel(), "sample_size": sample_size, } if not model.training: import editdistance with torch.no_grad(): lprobs_t = lprobs.transpose(0, 1).float().cpu() c_err = 0 c_len = 0 w_errs = 0 w_len = 0 wv_errs = 0 for lp, t, inp_l in zip( lprobs_t, sample["target_label"] if "target_label" in sample else sample["target"], input_lengths, ): lp = lp[:inp_l].unsqueeze(0) decoded = None if self.w2l_decoder is not None: decoded = self.w2l_decoder.decode(lp) if len(decoded) < 1: decoded = None else: decoded = decoded[0] if len(decoded) < 1: decoded = None else: decoded = decoded[0] p = (t != self.task.target_dictionary.pad()) & ( t != self.task.target_dictionary.eos() ) targ = t[p] targ_units = self.task.target_dictionary.string(targ) targ_units_arr = targ.tolist() toks = lp.argmax(dim=-1).unique_consecutive() pred_units_arr = toks[toks != self.blank_idx].tolist() c_err += editdistance.eval(pred_units_arr, targ_units_arr) c_len += len(targ_units_arr) targ_words = post_process(targ_units, self.post_process).split() pred_units = self.task.target_dictionary.string(pred_units_arr) pred_words_raw = post_process(pred_units, self.post_process).split() if decoded is not None and "words" in decoded: pred_words = decoded["words"] w_errs += editdistance.eval(pred_words, targ_words) wv_errs += editdistance.eval(pred_words_raw, targ_words) else: dist = editdistance.eval(pred_words_raw, targ_words) w_errs += dist wv_errs += dist w_len += len(targ_words) logging_output["wv_errors"] = wv_errs logging_output["w_errors"] = w_errs logging_output["w_total"] = w_len logging_output["c_errors"] = c_err logging_output["c_total"] = c_len return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) nsentences = utils.item( sum(log.get("nsentences", 0) for log in logging_outputs) ) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar("ntokens", ntokens) metrics.log_scalar("nsentences", nsentences) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) c_errors = sum(log.get("c_errors", 0) for log in logging_outputs) metrics.log_scalar("_c_errors", c_errors) c_total = sum(log.get("c_total", 0) for log in logging_outputs) metrics.log_scalar("_c_total", c_total) w_errors = sum(log.get("w_errors", 0) for log in logging_outputs) metrics.log_scalar("_w_errors", w_errors) wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs) metrics.log_scalar("_wv_errors", wv_errors) w_total = sum(log.get("w_total", 0) for log in logging_outputs) metrics.log_scalar("_w_total", w_total) if c_total > 0: metrics.log_derived( "uer", lambda meters: safe_round(meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3) if meters["_c_total"].sum > 0 else float("nan"), ) if w_total > 0: metrics.log_derived( "wer", lambda meters: safe_round(meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3) if meters["_w_total"].sum > 0 else float("nan"), ) metrics.log_derived( "raw_wer", lambda meters: safe_round(meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3) if meters["_w_total"].sum > 0 else float("nan"), ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
9,640
37.875
134
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/cross_entropy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion('cross_entropy') class CrossEntropyCriterion(FairseqCriterion): def __init__(self, task, sentence_avg): super().__init__(task) self.sentence_avg = sentence_avg def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample['net_input']) loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce) sample_size = sample['target'].size(0) if self.sentence_avg else sample['ntokens'] logging_output = { 'loss': loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size, } return loss, sample_size, logging_output def compute_loss(self, model, net_output, sample, reduce=True): lprobs = model.get_normalized_probs(net_output, log_probs=True) lprobs = lprobs.view(-1, lprobs.size(-1)) target = model.get_targets(sample, net_output).view(-1) loss = F.nll_loss( lprobs, target, ignore_index=self.padding_idx, reduction='sum' if reduce else 'none', ) return loss, loss @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get('loss', 0) for log in logging_outputs) ntokens = sum(log.get('ntokens', 0) for log in logging_outputs) sample_size = sum(log.get('sample_size', 0) for log in logging_outputs) metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3) if sample_size != ntokens: metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3) metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg)) else: metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg)) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
2,871
37.810811
99
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/sentence_prediction.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion('sentence_prediction') class SentencePredictionCriterion(FairseqCriterion): def __init__(self, task, classification_head_name, regression_target): super().__init__(task) self.classification_head_name = classification_head_name self.regression_target = regression_target @staticmethod def add_args(parser): # fmt: off parser.add_argument('--classification-head-name', default='sentence_classification_head', help='name of the classification head to use') # fmt: on def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ assert ( hasattr(model, 'classification_heads') and self.classification_head_name in model.classification_heads ), 'model must provide sentence classification head for --criterion=sentence_prediction' logits, _ = model( **sample['net_input'], features_only=True, classification_head_name=self.classification_head_name, ) targets = model.get_targets(sample, [logits]).view(-1) sample_size = targets.numel() if not self.regression_target: lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) loss = F.nll_loss(lprobs, targets, reduction='sum') else: logits = logits.view(-1).float() targets = targets.float() loss = F.mse_loss(logits, targets, reduction='sum') logging_output = { 'loss': loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample_size, 'sample_size': sample_size, } if not self.regression_target: preds = logits.argmax(dim=1) logging_output['ncorrect'] = (preds == targets).sum() return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get('loss', 0) for log in logging_outputs) ntokens = sum(log.get('ntokens', 0) for log in logging_outputs) nsentences = sum(log.get('nsentences', 0) for log in logging_outputs) sample_size = sum(log.get('sample_size', 0) for log in logging_outputs) metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3) if sample_size != ntokens: metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3) if len(logging_outputs) > 0 and 'ncorrect' in logging_outputs[0]: ncorrect = sum(log.get('ncorrect', 0) for log in logging_outputs) metrics.log_scalar('accuracy', 100.0 * ncorrect / nsentences, nsentences, round=1) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
3,717
37.729167
96
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/sentence_ranking.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion('sentence_ranking') class SentenceRankingCriterion(FairseqCriterion): def __init__(self, task, ranking_head_name, save_predictions, num_classes): super().__init__(task) self.ranking_head_name = ranking_head_name if save_predictions is not None: self.prediction_h = open(save_predictions, 'w') else: self.prediction_h = None self.num_classes = num_classes def __del__(self): if self.prediction_h is not None: self.prediction_h.close() @staticmethod def add_args(parser): # fmt: off parser.add_argument('--save-predictions', metavar='FILE', help='file to save predictions to') parser.add_argument('--ranking-head-name', default='sentence_classification_head', help='name of the ranking head to use') # fmt: on def forward(self, model, sample, reduce=True): """Compute ranking loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ assert ( hasattr(model, 'classification_heads') and self.ranking_head_name in model.classification_heads ), 'model must provide sentence ranking head for --criterion=sentence_ranking' scores = [] for idx in range(self.num_classes): score, _ = model( **sample['net_input{idx}'.format(idx=idx+1)], classification_head_name=self.ranking_head_name, ) scores.append(score) logits = torch.cat(scores, dim=1) sample_size = logits.size(0) if 'target' in sample: targets = model.get_targets(sample, [logits]).view(-1) lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) loss = F.nll_loss(lprobs, targets, reduction='sum') else: targets = None loss = torch.tensor(0.0, requires_grad=True) if self.prediction_h is not None: preds = logits.argmax(dim=1) for i, (id, pred) in enumerate(zip(sample['id'].tolist(), preds.tolist())): if targets is not None: label = targets[i].item() print('{}\t{}\t{}'.format(id, pred, label), file=self.prediction_h) else: print('{}\t{}'.format(id, pred), file=self.prediction_h) logging_output = { 'loss': loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample_size, 'sample_size': sample_size, } if targets is not None: logging_output['ncorrect'] = (logits.argmax(dim=1) == targets).sum() return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get('loss', 0) for log in logging_outputs) ntokens = sum(log.get('ntokens', 0) for log in logging_outputs) nsentences = sum(log.get('nsentences', 0) for log in logging_outputs) sample_size = sum(log.get('sample_size', 0) for log in logging_outputs) metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3) if sample_size != ntokens: metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3) if len(logging_outputs) > 0 and 'ncorrect' in logging_outputs[0]: ncorrect = sum(log.get('ncorrect', 0) for log in logging_outputs) metrics.log_scalar('accuracy', 100.0 * ncorrect / nsentences, nsentences, round=1) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
4,532
37.74359
94
py
RegularizedBN
RegularizedBN-main/fairseq/criterions/composite_loss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch import nn from fairseq import utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion('composite_loss') class CompositeLoss(FairseqCriterion): """This is a composite loss that, given a list of model outputs and a list of targets, computes an average of losses for each output-target pair""" def __init__(self, task, underlying_criterion): super().__init__(task) self.underlying_criterion = underlying_criterion @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" # fmt: off parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True, help='underlying criterion to use for the composite loss') # fmt: on @staticmethod def build_underlying_criterion(args, task): saved_criterion = args.criterion args.criterion = args.underlying_criterion assert saved_criterion != args.underlying_criterion underlying_criterion = task.build_criterion(args) args.criterion = saved_criterion return underlying_criterion @classmethod def build_criterion(cls, args, task): underlying_criterion = CompositeLoss.build_underlying_criterion(args, task) class FakeModel(nn.Module): def __init__(self, model, net_out, target): super().__init__() self.model = model self.net_out = net_out self.target = target def forward(self, **unused): return self.net_out def get_normalized_probs(self, net_output, log_probs, sample=None): return self.model.get_normalized_probs(net_output, log_probs, sample=sample) def get_targets(self, *unused): return self.target @property def decoder(self): return self.model.decoder class _CompositeLoss(FairseqCriterion): def __init__(self, task, underlying_criterion): super().__init__(task) self.underlying_criterion = underlying_criterion def forward(self, model, sample, reduce=True): net_outputs = model(**sample['net_input']) targets = sample['target'] bsz = targets[0].size(0) loss = net_outputs[0][0].new(1 if reduce else bsz).float().zero_() sample_size = 0 logging_output = {} for o, t in zip(net_outputs[0], targets): m = FakeModel(model, (o, net_outputs[1]), t) sample['target'] = t l, ss, logging_output = self.underlying_criterion(m, sample, reduce) loss += l sample_size += ss loss.div_(len(targets)) sample_size /= len(targets) logging_output['loss'] = utils.item(loss.data) if reduce else loss.data return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): return underlying_criterion.__class__.aggregate_logging_outputs(logging_outputs) @staticmethod def reduce_metrics(logging_outputs) -> None: underlying_criterion.__class__.reduce_metrics(logging_outputs) return _CompositeLoss(task, underlying_criterion)
3,689
35.9
96
py
RegularizedBN
RegularizedBN-main/fairseq/models/lstm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax, FairseqDropout from torch import Tensor from typing import Dict, List, Optional, Tuple DEFAULT_MAX_SOURCE_POSITIONS = 1e5 DEFAULT_MAX_TARGET_POSITIONS = 1e5 @register_model('lstm') class LSTMModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-freeze-embed', action='store_true', help='freeze encoder embeddings') parser.add_argument('--encoder-hidden-size', type=int, metavar='N', help='encoder hidden size') parser.add_argument('--encoder-layers', type=int, metavar='N', help='number of encoder layers') parser.add_argument('--encoder-bidirectional', action='store_true', help='make all layers of encoder bidirectional') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-freeze-embed', action='store_true', help='freeze decoder embeddings') parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size') parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', default=False, action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') # Granular dropout settings (if not specified these default to --dropout) parser.add_argument('--encoder-dropout-in', type=float, metavar='D', help='dropout probability for encoder input embedding') parser.add_argument('--encoder-dropout-out', type=float, metavar='D', help='dropout probability for encoder output') parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding') parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) if args.encoder_layers != args.decoder_layers: raise ValueError('--encoder-layers must match --decoder-layers') max_source_positions = getattr(args, 'max_source_positions', DEFAULT_MAX_SOURCE_POSITIONS) max_target_positions = getattr(args, 'max_target_positions', DEFAULT_MAX_TARGET_POSITIONS) def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) if args.encoder_embed_path: pretrained_encoder_embed = load_pretrained_embedding_from_file( args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim) else: num_embeddings = len(task.source_dictionary) pretrained_encoder_embed = Embedding( num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad() ) if args.share_all_embeddings: # double check all parameters combinations are valid if task.source_dictionary != task.target_dictionary: raise ValueError('--share-all-embeddings requires a joint dictionary') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError( '--share-all-embed not compatible with --decoder-embed-path' ) if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to ' 'match --decoder-embed-dim' ) pretrained_decoder_embed = pretrained_encoder_embed args.share_decoder_input_output_embed = True else: # separate decoder input embeddings pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim ) # one last double check of parameter combinations if args.share_decoder_input_output_embed and ( args.decoder_embed_dim != args.decoder_out_embed_dim): raise ValueError( '--share-decoder-input-output-embeddings requires ' '--decoder-embed-dim to match --decoder-out-embed-dim' ) if args.encoder_freeze_embed: pretrained_encoder_embed.weight.requires_grad = False if args.decoder_freeze_embed: pretrained_decoder_embed.weight.requires_grad = False encoder = LSTMEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, hidden_size=args.encoder_hidden_size, num_layers=args.encoder_layers, dropout_in=args.encoder_dropout_in, dropout_out=args.encoder_dropout_out, bidirectional=args.encoder_bidirectional, pretrained_embed=pretrained_encoder_embed, max_source_positions=max_source_positions, ) decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=options.eval_bool(args.decoder_attention), encoder_output_units=encoder.output_units, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=( options.eval_str_list(args.adaptive_softmax_cutoff, type=int) if args.criterion == 'adaptive_loss' else None ), max_target_positions=max_target_positions, residuals=False, ) return cls(encoder, decoder) def forward( self, src_tokens, src_lengths, prev_output_tokens, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): encoder_out = self.encoder(src_tokens, src_lengths=src_lengths) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state ) return decoder_out class LSTMEncoder(FairseqEncoder): """LSTM encoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, bidirectional=False, left_pad=True, pretrained_embed=None, padding_idx=None, max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS, ): super().__init__(dictionary) self.num_layers = num_layers self.dropout_in_module = FairseqDropout(dropout_in, module_name=self.__class__.__name__) self.dropout_out_module = FairseqDropout(dropout_out, module_name=self.__class__.__name__) self.bidirectional = bidirectional self.hidden_size = hidden_size self.max_source_positions = max_source_positions num_embeddings = len(dictionary) self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) else: self.embed_tokens = pretrained_embed self.lstm = LSTM( input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, dropout=self.dropout_out_module.p if num_layers > 1 else 0., bidirectional=bidirectional, ) self.left_pad = left_pad self.output_units = hidden_size if bidirectional: self.output_units *= 2 def forward( self, src_tokens: Tensor, src_lengths: Tensor, enforce_sorted: bool = True, ): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` enforce_sorted (bool, optional): if True, `src_tokens` is expected to contain sequences sorted by length in a decreasing order. If False, this condition is not required. Default: True. """ if self.left_pad: # nn.utils.rnn.pack_padded_sequence requires right-padding; # convert left-padding to right-padding src_tokens = utils.convert_padding_direction( src_tokens, torch.zeros_like(src_tokens).fill_(self.padding_idx), left_to_right=True, ) bsz, seqlen = src_tokens.size() # embed tokens x = self.embed_tokens(src_tokens) x = self.dropout_in_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # pack embedded source tokens into a PackedSequence packed_x = nn.utils.rnn.pack_padded_sequence( x, src_lengths.data, enforce_sorted=enforce_sorted ) # apply LSTM if self.bidirectional: state_size = 2 * self.num_layers, bsz, self.hidden_size else: state_size = self.num_layers, bsz, self.hidden_size h0 = x.new_zeros(*state_size) c0 = x.new_zeros(*state_size) packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, _ = nn.utils.rnn.pad_packed_sequence(packed_outs, padding_value=self.padding_idx*1.0) x = self.dropout_out_module(x) assert list(x.size()) == [seqlen, bsz, self.output_units] if self.bidirectional: final_hiddens = self.combine_bidir(final_hiddens, bsz) final_cells = self.combine_bidir(final_cells, bsz) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() return tuple(( x, # seq_len x batch x hidden final_hiddens, # num_layers x batch x num_directions*hidden final_cells, # num_layers x batch x num_directions*hidden encoder_padding_mask, # seq_len x batch )) def combine_bidir(self, outs, bsz: int): out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous() return out.view(self.num_layers, bsz, -1) def reorder_encoder_out(self, encoder_out, new_order): return tuple(( encoder_out[0].index_select(1, new_order), encoder_out[1].index_select(1, new_order), encoder_out[2].index_select(1, new_order), encoder_out[3].index_select(1, new_order), )) def max_positions(self): """Maximum input length supported by the encoder.""" return self.max_source_positions class AttentionLayer(nn.Module): def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False): super().__init__() self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias) self.output_proj = Linear(input_embed_dim + source_embed_dim, output_embed_dim, bias=bias) def forward(self, input, source_hids, encoder_padding_mask): # input: bsz x input_embed_dim # source_hids: srclen x bsz x source_embed_dim # x: bsz x source_embed_dim x = self.input_proj(input) # compute attention attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2) # don't attend over padding if encoder_padding_mask is not None: attn_scores = attn_scores.float().masked_fill_( encoder_padding_mask, float('-inf') ).type_as(attn_scores) # FP16 support: cast to float and back attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz # sum weighted sources x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0) x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1))) return x, attn_scores class LSTMDecoder(FairseqIncrementalDecoder): """LSTM decoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True, encoder_output_units=512, pretrained_embed=None, share_input_output_embed=False, adaptive_softmax_cutoff=None, max_target_positions=DEFAULT_MAX_TARGET_POSITIONS, residuals=False, ): super().__init__(dictionary) self.dropout_in_module = FairseqDropout(dropout_in, module_name=self.__class__.__name__) self.dropout_out_module = FairseqDropout(dropout_out, module_name=self.__class__.__name__) self.hidden_size = hidden_size self.share_input_output_embed = share_input_output_embed self.need_attn = True self.max_target_positions = max_target_positions self.residuals = residuals self.num_layers = num_layers self.adaptive_softmax = None num_embeddings = len(dictionary) padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) else: self.embed_tokens = pretrained_embed self.encoder_output_units = encoder_output_units if encoder_output_units != hidden_size and encoder_output_units != 0: self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size) self.encoder_cell_proj = Linear(encoder_output_units, hidden_size) else: self.encoder_hidden_proj = self.encoder_cell_proj = None # disable input feeding if there is no encoder # input feeding is described in arxiv.org/abs/1508.04025 input_feed_size = 0 if encoder_output_units == 0 else hidden_size self.layers = nn.ModuleList([ LSTMCell( input_size=input_feed_size + embed_dim if layer == 0 else hidden_size, hidden_size=hidden_size, ) for layer in range(num_layers) ]) if attention: # TODO make bias configurable self.attention = AttentionLayer(hidden_size, encoder_output_units, hidden_size, bias=False) else: self.attention = None if hidden_size != out_embed_dim: self.additional_fc = Linear(hidden_size, out_embed_dim) if adaptive_softmax_cutoff is not None: # setting adaptive_softmax dropout to dropout_out for now but can be redefined self.adaptive_softmax = AdaptiveSoftmax( num_embeddings, hidden_size, adaptive_softmax_cutoff, dropout=dropout_out, ) elif not self.share_input_output_embed: self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) def forward( self, prev_output_tokens, encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, src_lengths: Optional[Tensor] = None, ): x, attn_scores = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) return self.output_layer(x), attn_scores def extract_features( self, prev_output_tokens, encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): """ Similar to *forward* but only return features. """ # get outputs from encoder if encoder_out is not None: encoder_outs = encoder_out[0] encoder_hiddens = encoder_out[1] encoder_cells = encoder_out[2] encoder_padding_mask = encoder_out[3] else: encoder_outs = torch.empty(0) encoder_hiddens = torch.empty(0) encoder_cells = torch.empty(0) encoder_padding_mask = torch.empty(0) srclen = encoder_outs.size(0) if incremental_state is not None and len(incremental_state) > 0: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() # embed tokens x = self.embed_tokens(prev_output_tokens) x = self.dropout_in_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental generation) if incremental_state is not None and len(incremental_state) > 0: prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state) elif encoder_out is not None: # setup recurrent cells prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)] prev_cells = [encoder_cells[i] for i in range(self.num_layers)] if self.encoder_hidden_proj is not None: prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens] prev_cells = [self.encoder_cell_proj(y) for y in prev_cells] input_feed = x.new_zeros(bsz, self.hidden_size) else: # setup zero cells, since there is no encoder zero_state = x.new_zeros(bsz, self.hidden_size) prev_hiddens = [zero_state for i in range(self.num_layers)] prev_cells = [zero_state for i in range(self.num_layers)] input_feed = None assert srclen > 0 or self.attention is None, \ "attention is not supported if there are no encoder outputs" attn_scores = x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None outs = [] for j in range(seqlen): # input feeding: concatenate context vector from previous time step if input_feed is not None: input = torch.cat((x[j, :, :], input_feed), dim=1) else: input = x[j] for i, rnn in enumerate(self.layers): # recurrent cell hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) # hidden state becomes the input to the next layer input = self.dropout_out_module(hidden) if self.residuals: input = input + prev_hiddens[i] # save state for next time step prev_hiddens[i] = hidden prev_cells[i] = cell # apply attention using the last layer's hidden state if self.attention is not None: assert attn_scores is not None out, attn_scores[:, j, :] = self.attention(hidden, encoder_outs, encoder_padding_mask) else: out = hidden out = self.dropout_out_module(out) # input feeding if input_feed is not None: input_feed = out # save final output outs.append(out) # Stack all the necessary tensors together and store prev_hiddens_tensor = torch.stack(prev_hiddens) prev_cells_tensor = torch.stack(prev_cells) cache_state = torch.jit.annotate( Dict[str, Optional[Tensor]], { "prev_hiddens": prev_hiddens_tensor, "prev_cells": prev_cells_tensor, "input_feed": input_feed, } ) self.set_incremental_state(incremental_state, 'cached_state', cache_state) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) # T x B x C -> B x T x C x = x.transpose(1, 0) if hasattr(self, 'additional_fc') and self.adaptive_softmax is None: x = self.additional_fc(x) x = self.dropout_out_module(x) # srclen x tgtlen x bsz -> bsz x tgtlen x srclen if not self.training and self.need_attn and self.attention is not None: assert attn_scores is not None attn_scores = attn_scores.transpose(0, 2) else: attn_scores = None return x, attn_scores def output_layer(self, x): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = self.fc_out(x) return x def get_cached_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], ) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]: cached_state = self.get_incremental_state(incremental_state, 'cached_state') assert cached_state is not None prev_hiddens_ = cached_state["prev_hiddens"] assert prev_hiddens_ is not None prev_cells_ = cached_state["prev_cells"] assert prev_cells_ is not None prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)] prev_cells = [prev_cells_[j] for j in range(self.num_layers)] input_feed = cached_state["input_feed"] # can be None for decoder-only language models return prev_hiddens, prev_cells, input_feed def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): if incremental_state is None or len(incremental_state) == 0: return prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state) prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens] prev_cells = [p.index_select(0, new_order) for p in prev_cells] if input_feed is not None: input_feed = input_feed.index_select(0, new_order) cached_state_new = torch.jit.annotate( Dict[str, Optional[Tensor]], { "prev_hiddens": torch.stack(prev_hiddens), "prev_cells": torch.stack(prev_cells), "input_feed": input_feed, } ) self.set_incremental_state(incremental_state, 'cached_state', cached_state_new), return def max_positions(self): """Maximum output length supported by the decoder.""" return self.max_target_positions def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.uniform_(m.weight, -0.1, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def LSTM(input_size, hidden_size, **kwargs): m = nn.LSTM(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if 'weight' in name or 'bias' in name: param.data.uniform_(-0.1, 0.1) return m def LSTMCell(input_size, hidden_size, **kwargs): m = nn.LSTMCell(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if 'weight' in name or 'bias' in name: param.data.uniform_(-0.1, 0.1) return m def Linear(in_features, out_features, bias=True, dropout=0.): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.uniform_(-0.1, 0.1) if bias: m.bias.data.uniform_(-0.1, 0.1) return m @register_model_architecture('lstm', 'lstm') def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_freeze_embed = getattr(args, 'encoder_freeze_embed', False) args.encoder_hidden_size = getattr(args, 'encoder_hidden_size', args.encoder_embed_dim) args.encoder_layers = getattr(args, 'encoder_layers', 1) args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', False) args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout) args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_freeze_embed = getattr(args, 'decoder_freeze_embed', False) args.decoder_hidden_size = getattr(args, 'decoder_hidden_size', args.decoder_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 1) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) args.decoder_attention = getattr(args, 'decoder_attention', '1') args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout) args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000') @register_model_architecture('lstm', 'lstm_wiseman_iwslt_de_en') def lstm_wiseman_iwslt_de_en(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256) args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', 0) args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', 0) args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout) base_architecture(args) @register_model_architecture('lstm', 'lstm_luong_wmt_en_de') def lstm_luong_wmt_en_de(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1000) args.encoder_layers = getattr(args, 'encoder_layers', 4) args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1000) args.decoder_layers = getattr(args, 'decoder_layers', 4) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 1000) args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', 0) base_architecture(args)
29,637
42.457478
103
py
RegularizedBN
RegularizedBN-main/fairseq/models/masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoderModel, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, SinusoidalPositionalEmbedding, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params logger = logging.getLogger(__name__) @register_model('masked_lm') class MaskedLMModel(FairseqEncoderModel): """ Class for training a Masked Language Model. It also supports an additional sentence level prediction if the sent-loss argument is set. """ def __init__(self, args, encoder): super().__init__(encoder) self.args = args # if specified then apply bert initialization on the model. We need # to explictly call this to make sure that the output embeddings # and projection layers are also correctly initialized if getattr(args, 'apply_bert_init', False): self.apply(init_bert_params) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # Arguments related to dropout parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for' ' attention weights') parser.add_argument('--act-dropout', type=float, metavar='D', help='dropout probability after' ' activation in FFN') # Arguments related to hidden states and self-attention parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') # Arguments related to input and output embeddings parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--share-encoder-input-output-embed', action='store_true', help='share encoder input' ' and output embeddings') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings' ' (outside self attention)') parser.add_argument('--num-segment', type=int, metavar='N', help='num segment in the input') parser.add_argument('--max-positions', type=int, help='number of positional embeddings to learn') # Arguments related to sentence level prediction parser.add_argument('--sentence-class-num', type=int, metavar='N', help='number of classes for sentence task') parser.add_argument('--sent-loss', action='store_true', help='if set,' ' calculate sentence level predictions') # Arguments related to parameter initialization parser.add_argument('--apply-bert-init', action='store_true', help='use custom param initialization for BERT') # misc params parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='Which activation function to use for pooler layer.') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') def forward(self, src_tokens, segment_labels=None, **kwargs): return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs) def max_positions(self): return self.encoder.max_positions @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_positions'): args.max_positions = args.tokens_per_sample logger.info(args) encoder = MaskedLMEncoder(args, task.dictionary) return cls(args, encoder) class MaskedLMEncoder(FairseqEncoder): """ Encoder for Masked Language Modelling. """ def __init__(self, args, dictionary): super().__init__(dictionary) self.padding_idx = dictionary.pad() self.vocab_size = dictionary.__len__() self.max_positions = args.max_positions self.sentence_encoder = TransformerSentenceEncoder( padding_idx=self.padding_idx, vocab_size=self.vocab_size, num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.act_dropout, max_seq_len=self.max_positions, num_segments=args.num_segment, use_position_embeddings=not args.no_token_positional_embeddings, encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, learned_pos_embedding=args.encoder_learned_pos, ) self.share_input_output_embed = args.share_encoder_input_output_embed self.embed_out = None self.sentence_projection_layer = None self.sentence_out_dim = args.sentence_class_num self.lm_output_learned_bias = None # Remove head is set to true during fine-tuning self.load_softmax = not getattr(args, 'remove_head', False) self.masked_lm_pooler = nn.Linear( args.encoder_embed_dim, args.encoder_embed_dim ) self.pooler_activation = utils.get_activation_fn(args.pooler_activation_fn) self.lm_head_transform_weight = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim) self.activation_fn = utils.get_activation_fn(args.activation_fn) self.layer_norm = LayerNorm(args.encoder_embed_dim) self.lm_output_learned_bias = None if self.load_softmax: self.lm_output_learned_bias = nn.Parameter(torch.zeros(self.vocab_size)) if not self.share_input_output_embed: self.embed_out = nn.Linear( args.encoder_embed_dim, self.vocab_size, bias=False ) if args.sent_loss: self.sentence_projection_layer = nn.Linear( args.encoder_embed_dim, self.sentence_out_dim, bias=False ) def forward(self, src_tokens, segment_labels=None, masked_tokens=None, **unused): """ Forward pass for Masked LM encoder. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). Here we assume that the sentence representation corresponds to the output of the classification_token (see bert_task or cross_lingual_lm task for more details). Args: - src_tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Returns: - a tuple of the following: - logits for predictions in format B x T x C to be used in softmax afterwards - a dictionary of additional data, where 'pooled_output' contains the representation for classification_token and 'inner_states' is a list of internal model states used to compute the predictions (similar in ELMO). 'sentence_logits' is the prediction logit for NSP task and is only computed if this is specified in the input arguments. """ inner_states, sentence_rep = self.sentence_encoder( src_tokens, segment_labels=segment_labels, ) x = inner_states[-1].transpose(0, 1) # project masked tokens only if masked_tokens is not None: x = x[masked_tokens, :] x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x))) pooled_output = self.pooler_activation(self.masked_lm_pooler(sentence_rep)) # project back to size of vocabulary if self.share_input_output_embed \ and hasattr(self.sentence_encoder.embed_tokens, 'weight'): x = F.linear(x, self.sentence_encoder.embed_tokens.weight) elif self.embed_out is not None: x = self.embed_out(x) if self.lm_output_learned_bias is not None: x = x + self.lm_output_learned_bias sentence_logits = None if self.sentence_projection_layer: sentence_logits = self.sentence_projection_layer(pooled_output) return x, { 'inner_states': inner_states, 'pooled_output': pooled_output, 'sentence_logits': sentence_logits } def max_positions(self): """Maximum output length supported by the encoder.""" return self.max_positions def upgrade_state_dict_named(self, state_dict, name): if isinstance( self.sentence_encoder.embed_positions, SinusoidalPositionalEmbedding ): state_dict[ name + '.sentence_encoder.embed_positions._float_tensor' ] = torch.FloatTensor(1) if not self.load_softmax: for k in list(state_dict.keys()): if ( "embed_out.weight" in k or "sentence_projection_layer.weight" in k or "lm_output_learned_bias" in k ): del state_dict[k] return state_dict @register_model_architecture('masked_lm', 'masked_lm') def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.act_dropout = getattr(args, 'act_dropout', 0.0) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.num_segment = getattr(args, 'num_segment', 2) args.sentence_class_num = getattr(args, 'sentence_class_num', 2) args.sent_loss = getattr(args, 'sent_loss', False) args.apply_bert_init = getattr(args, 'apply_bert_init', False) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) @register_model_architecture('masked_lm', 'bert_base') def bert_base_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.share_encoder_input_output_embed = getattr( args, 'share_encoder_input_output_embed', True) args.no_token_positional_embeddings = getattr( args, 'no_token_positional_embeddings', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True) args.num_segment = getattr(args, 'num_segment', 2) args.encoder_layers = getattr(args, 'encoder_layers', 12) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072) args.sentence_class_num = getattr(args, 'sentence_class_num', 2) args.sent_loss = getattr(args, 'sent_loss', True) args.apply_bert_init = getattr(args, 'apply_bert_init', True) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) base_architecture(args) @register_model_architecture('masked_lm', 'bert_large') def bert_large_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_layers = getattr(args, 'encoder_layers', 24) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) bert_base_architecture(args) @register_model_architecture('masked_lm', 'xlm_base') def xlm_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.share_encoder_input_output_embed = getattr( args, 'share_encoder_input_output_embed', True) args.no_token_positional_embeddings = getattr( args, 'no_token_positional_embeddings', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True) args.num_segment = getattr(args, 'num_segment', 1) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.sent_loss = getattr(args, 'sent_loss', False) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.apply_bert_init = getattr(args, 'apply_bert_init', True) base_architecture(args)
15,107
41.798867
100
py
RegularizedBN
RegularizedBN-main/fairseq/models/model_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional import torch from torch import Tensor @torch.jit.script def script_skip_tensor_list(x: List[Tensor], mask): res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x] outputs = [] for i, t in enumerate(res): if t.numel() != 0: outputs.append(t) else: outputs.append(x[i]) return outputs @torch.jit.script def script_skip_tensor(x: Tensor, mask): # None case if x.size(0) == 0: return x res = x[mask] if x.size(0) == mask.size(0) else x[:, mask] if res.numel() == 0: return x else: return res @torch.jit.script def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int): """ Expand 2D/3D tensor on dim=1 """ if x is None: return None assert x.dim() == 2 or x.dim() == 3 assert trg_dim >= x.size(1), (trg_dim, x.size()) if trg_dim == x.size(1): return x dims = [x.size(0), trg_dim - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1) return x @torch.jit.script def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor: return x if x is not None else y @torch.jit.script def fill_tensors(x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int) -> Optional[Tensor]: """ Filling tensor x with y at masked positions (dim=0). """ if x is None or x.size()[0] == 0 or y is None: return x assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() if n_selected == 0: return x assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx) x[mask] = y elif x.size(1) > y.size(1): x[mask] = torch.tensor(padding_idx).type_as(x) if x.dim() == 2: x[mask, :y.size(1)] = y else: x[mask, :y.size(1), :] = y else: x[mask] = y return x
2,335
24.67033
103
py
RegularizedBN
RegularizedBN-main/fairseq/models/fairseq_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from typing import Dict, List, NamedTuple, Optional from torch import Tensor EncoderOut = NamedTuple( "EncoderOut", [ ("encoder_out", Tensor), # T x B x C ("encoder_padding_mask", Optional[Tensor]), # B x T ("encoder_embedding", Optional[Tensor]), # B x T x C ("encoder_states", Optional[List[Tensor]]), # List[T x B x C] ("src_tokens", Optional[Tensor]), # B x T ("src_lengths", Optional[Tensor]), # B x 1 ], ) class FairseqEncoder(nn.Module): """Base class for encoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary def forward(self, src_tokens, src_lengths=None, **kwargs): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` """ raise NotImplementedError def forward_torchscript(self, net_input: Dict[str, Tensor]): """A TorchScript-compatible version of forward. Encoders which use additional arguments may want to override this method for TorchScript compatibility. """ if torch.jit.is_scripting(): return self.forward( src_tokens=net_input["src_tokens"], src_lengths=net_input["src_lengths"], ) else: return self.forward_non_torchscript(net_input) @torch.jit.unused def forward_non_torchscript(self, net_input: Dict[str, Tensor]): encoder_input = { k: v for k, v in net_input.items() if k != "prev_output_tokens" } return self.forward(**encoder_input) def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to `new_order`. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: `encoder_out` rearranged according to `new_order` """ raise NotImplementedError def max_positions(self): """Maximum input length supported by the encoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict(self, state_dict): """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict def set_num_updates(self, num_updates): """State from trainer to pass along to model at every update.""" def _apply(m): if hasattr(m, 'set_num_updates') and m != self: m.set_num_updates(num_updates) self.apply(_apply)
2,955
31.130435
78
py
RegularizedBN
RegularizedBN-main/fairseq/models/fconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, BeamableMM, FairseqDropout, GradMultiply, LearnedPositionalEmbedding, LinearizedConvolution, ) @register_model('fconv') class FConvModel(FairseqEncoderDecoderModel): """ A fully convolutional model, i.e. a convolutional encoder and a convolutional decoder, as described in `"Convolutional Sequence to Sequence Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_. Args: encoder (FConvEncoder): the encoder decoder (FConvDecoder): the decoder The Convolutional model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.fconv_parser :prog: """ @classmethod def hub_models(cls): def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } return { 'conv.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2'), 'conv.wmt14.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2'), 'conv.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2'), } def __init__(self, encoder, decoder): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--share-input-output-embed', action='store_true', help='share input and output embeddings (requires' ' --decoder-out-embed-dim and --decoder-embed-dim' ' to be equal)') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) encoder_embed_dict = None if args.encoder_embed_path: encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path) utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary) decoder_embed_dict = None if args.decoder_embed_path: decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path) utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary) encoder = FConvEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, embed_dict=encoder_embed_dict, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, ) decoder = FConvDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, embed_dict=decoder_embed_dict, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, share_embed=args.share_input_output_embed, ) return FConvModel(encoder, decoder) class FConvEncoder(FairseqEncoder): """ Convolutional encoder consisting of `len(convolutions)` layers. Args: dictionary (~fairseq.data.Dictionary): encoding dictionary embed_dim (int, optional): embedding dimension embed_dict (str, optional): filename from which to load pre-trained embeddings max_positions (int, optional): maximum supported input sequence length convolutions (list, optional): the convolutional layer structure. Each list item `i` corresponds to convolutional layer `i`. Layers are given as ``(out_channels, kernel_width, [residual])``. Residual connections are added between layers when ``residual=1`` (which is the default behavior). dropout (float, optional): dropout to be applied before each conv layer """ def __init__( self, dictionary, embed_dim=512, embed_dict=None, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, ): super().__init__(dictionary) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for _, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append(Linear(residual_dim, out_channels) if residual_dim != out_channels else None) if kernel_size % 2 == 1: padding = kernel_size // 2 else: padding = 0 self.convolutions.append( ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout, padding=padding) ) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (tuple): a tuple with two elements, where the first element is the last encoder layer's output and the second element is the same quantity summed with the input embedding (used for attention). The shape of both tensors is `(batch, src_len, embed_dim)`. - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = self.dropout_module(x) input_embedding = x # project to size of convolution x = self.fc1(x) # used to mask padding in input encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) residuals = [x] # temporal convolutions for proj, conv, res_layer in zip(self.projections, self.convolutions, self.residuals): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = self.dropout_module(x) if conv.kernel_size[0] % 2 == 1: # padding is implicit in the conv x = conv(x) else: padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding) * math.sqrt(0.5) return { 'encoder_out': (x, y), 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = ( encoder_out['encoder_out'][0].index_select(0, new_order), encoder_out['encoder_out'][1].index_select(0, new_order), ) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions class AttentionLayer(nn.Module): def __init__(self, conv_channels, embed_dim, bmm=None): super().__init__() # projects from output of convolution to embedding dimension self.in_projection = Linear(conv_channels, embed_dim) # projects from embedding dimension to convolution size self.out_projection = Linear(embed_dim, conv_channels) self.bmm = bmm if bmm is not None else torch.bmm def forward(self, x, target_embedding, encoder_out, encoder_padding_mask): residual = x # attention x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5) x = self.bmm(x, encoder_out[0]) # don't attend over padding if encoder_padding_mask is not None: x = x.float().masked_fill( encoder_padding_mask.unsqueeze(1), float('-inf') ).type_as(x) # FP16 support: cast to float and back # softmax over last dim sz = x.size() x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1) x = x.view(sz) attn_scores = x x = self.bmm(x, encoder_out[1]) # scale attention output (respecting potentially different lengths) s = encoder_out[1].size(1) if encoder_padding_mask is None: x = x * (s * math.sqrt(1.0 / s)) else: s = s - encoder_padding_mask.type_as(x).sum(dim=1, keepdim=True) # exclude padding s = s.unsqueeze(-1) x = x * (s * s.rsqrt()) # project back x = (self.out_projection(x) + residual) * math.sqrt(0.5) return x, attn_scores def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs): """Replace torch.bmm with BeamableMM.""" if beamable_mm_beam_size is not None: del self.bmm self.add_module('bmm', BeamableMM(beamable_mm_beam_size)) class FConvDecoder(FairseqIncrementalDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 20, attention=True, dropout=0.1, share_embed=False, positional_embeddings=True, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0., ): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([2])) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.need_attn = True convolutions = extend_conv_spec(convolutions) in_channels = convolutions[0][0] if isinstance(attention, bool): # expand True into [True, True, ...] and do the same with False attention = [attention] * len(convolutions) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError('Attention is expected to be a list of booleans of ' 'length equal to the number of layers.') num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) if embed_dict: self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, padding_idx, ) if positional_embeddings else None self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.residuals = [] layer_in_channels = [in_channels] for i, (out_channels, kernel_size, residual) in enumerate(convolutions): if residual == 0: residual_dim = out_channels else: residual_dim = layer_in_channels[-residual] self.projections.append(Linear(residual_dim, out_channels) if residual_dim != out_channels else None) self.convolutions.append( LinearizedConv1d(in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout) ) self.attention.append(AttentionLayer(out_channels, embed_dim) if attention[i] else None) self.residuals.append(residual) in_channels = out_channels layer_in_channels.append(out_channels) self.adaptive_softmax = None self.fc2 = self.fc3 = None if adaptive_softmax_cutoff is not None: assert not share_embed self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, in_channels, adaptive_softmax_cutoff, dropout=adaptive_softmax_dropout) else: self.fc2 = Linear(in_channels, out_embed_dim) if share_embed: assert out_embed_dim == embed_dim, \ "Shared embed weights implies same dimensions " \ " out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim) self.fc3 = nn.Linear(out_embed_dim, num_embeddings) self.fc3.weight = self.embed_tokens.weight else: self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused): if encoder_out is not None: encoder_padding_mask = encoder_out['encoder_padding_mask'] encoder_out = encoder_out['encoder_out'] # split and transpose encoder outputs encoder_a, encoder_b = self._split_encoder_out(encoder_out, incremental_state) if self.embed_positions is not None: pos_embed = self.embed_positions(prev_output_tokens, incremental_state) else: pos_embed = 0 if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] x = self._embed_tokens(prev_output_tokens, incremental_state) # embed tokens and combine with positional embeddings x += pos_embed x = self.dropout_module(x) target_embedding = x # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = self._transpose_if_training(x, incremental_state) # temporal convolutions avg_attn_scores = None num_attn_layers = len(self.attention) residuals = [x] for proj, conv, attention, res_layer in zip(self.projections, self.convolutions, self.attention, self.residuals): if res_layer > 0: residual = residuals[-res_layer] residual = residual if proj is None else proj(residual) else: residual = None x = self.dropout_module(x) x = conv(x, incremental_state) x = F.glu(x, dim=2) # attention if attention is not None: x = self._transpose_if_training(x, incremental_state) x, attn_scores = attention(x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask) if not self.training and self.need_attn: attn_scores = attn_scores / num_attn_layers if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) x = self._transpose_if_training(x, incremental_state) # residual if residual is not None: x = (x + residual) * math.sqrt(0.5) residuals.append(x) # T x B x C -> B x T x C x = self._transpose_if_training(x, incremental_state) # project back to size of vocabulary if not using adaptive softmax if self.fc2 is not None and self.fc3 is not None: x = self.fc2(x) x = self.dropout_module(x) x = self.fc3(x) return x, avg_attn_scores def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) encoder_out = utils.get_incremental_state(self, incremental_state, 'encoder_out') if encoder_out is not None: encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out) utils.set_incremental_state(self, incremental_state, 'encoder_out', encoder_out) def max_positions(self): """Maximum output length supported by the decoder.""" return self.embed_positions.max_positions if self.embed_positions is not None else float('inf') def upgrade_state_dict(self, state_dict): if utils.item(state_dict.get('decoder.version', torch.Tensor([1]))[0]) < 2: # old models use incorrect weight norm dimension for i, conv in enumerate(self.convolutions): # reconfigure weight norm nn.utils.remove_weight_norm(conv) self.convolutions[i] = nn.utils.weight_norm(conv, dim=0) state_dict['decoder.version'] = torch.Tensor([1]) return state_dict def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _embed_tokens(self, tokens, incremental_state): if incremental_state is not None: # keep only the last token for incremental forward pass tokens = tokens[:, -1:] return self.embed_tokens(tokens) def _split_encoder_out(self, encoder_out, incremental_state): """Split and transpose encoder outputs. This is cached when doing incremental inference. """ cached_result = utils.get_incremental_state(self, incremental_state, 'encoder_out') if cached_result is not None: return cached_result # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(1, 2).contiguous() result = (encoder_a, encoder_b) if incremental_state is not None: utils.set_incremental_state(self, incremental_state, 'encoder_out', result) return result def _transpose_if_training(self, x, incremental_state): if incremental_state is None: x = x.transpose(0, 1) return x def extend_conv_spec(convolutions): """ Extends convolutional spec that is a list of tuples of 2 or 3 parameters (kernel size, dim size and optionally how many layers behind to look for residual) to default the residual propagation param if it is not specified """ extended = [] for spec in convolutions: if len(spec) == 3: extended.append(spec) elif len(spec) == 2: extended.append(spec + (1,)) else: raise Exception('invalid number of parameters in convolution spec ' + str(spec) + '. expected 2 or 3') return tuple(extended) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, 0, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, dropout=0.): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features)) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m) def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0., **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) def ConvTBC(in_channels, out_channels, kernel_size, dropout=0., **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) @register_model_architecture('fconv', 'fconv') def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 20') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 20') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.decoder_attention = getattr(args, 'decoder_attention', 'True') args.share_input_output_embed = getattr(args, 'share_input_output_embed', False) @register_model_architecture('fconv', 'fconv_iwslt_de_en') def fconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256) args.encoder_layers = getattr(args, 'encoder_layers', '[(256, 3)] * 4') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256) args.decoder_layers = getattr(args, 'decoder_layers', '[(256, 3)] * 3') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) base_architecture(args) @register_model_architecture('fconv', 'fconv_wmt_en_ro') def fconv_wmt_en_ro(args): args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) base_architecture(args) @register_model_architecture('fconv', 'fconv_wmt_en_de') def fconv_wmt_en_de(args): convs = '[(512, 3)] * 9' # first 9 layers have 512 units convs += ' + [(1024, 3)] * 4' # next 4 layers have 1024 units convs += ' + [(2048, 1)] * 2' # final 2 layers use 1x1 convolutions args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.encoder_layers = getattr(args, 'encoder_layers', convs) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768) args.decoder_layers = getattr(args, 'decoder_layers', convs) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) base_architecture(args) @register_model_architecture('fconv', 'fconv_wmt_en_fr') def fconv_wmt_en_fr(args): convs = '[(512, 3)] * 6' # first 6 layers have 512 units convs += ' + [(768, 3)] * 4' # next 4 layers have 768 units convs += ' + [(1024, 3)] * 3' # next 3 layers have 1024 units convs += ' + [(2048, 1)] * 1' # next 1 layer uses 1x1 convolutions convs += ' + [(4096, 1)] * 1' # final 1 layer uses 1x1 convolutions args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.encoder_layers = getattr(args, 'encoder_layers', convs) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768) args.decoder_layers = getattr(args, 'decoder_layers', convs) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) base_architecture(args)
27,685
40.138187
127
py
RegularizedBN
RegularizedBN-main/fairseq/models/lightconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, DynamicConv, FairseqDropout, LayerNorm, PositionalEmbedding, LightweightConv, MultiheadAttention, ) from fairseq.modules import NormSelect @register_model('lightconv') class LightConvModel(FairseqEncoderDecoderModel): """ LightConv and DynamicConv model from `"Pay Less Attention with Lightweight and Dynamic Convolutions" (Wu, et al, 2019) <https://openreview.net/pdf?id=SkVhlh09tX>`_. To use LightConv please set ``--encoder-conv-type lightweight --decoder-conv-type lightweight`` To use DynamicConv please set ``--encoder-conv-type dynamic --decoder-conv-type dynamic`` Args: encoder (LightConvEncoder): the encoder decoder (LightConvDecoder): the decoder The LightConv model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.lightconv_parser :prog: """ @classmethod def hub_models(cls): # fmt: off def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } return { 'lightconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.lightconv.tar.gz'), 'dynamicconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.dynamicconv.tar.gz'), 'lightconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv.tar.gz'), 'dynamicconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv.tar.gz'), 'lightconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.dynamicconv-glu.tar.gz'), 'lightconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.lightconv-glu.tar.gz'), 'dynamicconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.dynamicconv-glu.tar.gz'), } # fmt: on def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--relu-dropout', type=float, metavar='D', help='dropout probability after ReLU in FFN') parser.add_argument('--input-dropout', type=float, metavar='D', help='dropout probability of the inputs') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-conv-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads or LightConv/DynamicConv heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-conv-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads or LightConv/DynamicConv heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') """LightConv and DynamicConv arguments""" parser.add_argument('--encoder-kernel-size-list', type=lambda x: options.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31,31]")') parser.add_argument('--decoder-kernel-size-list', type=lambda x: options.eval_str_list(x, int), help='list of kernel size (default: "[3,7,15,31,31,31]")') parser.add_argument('--encoder-glu', type=options.eval_bool, help='glu after in proj') parser.add_argument('--decoder-glu', type=options.eval_bool, help='glu after in proj') parser.add_argument('--encoder-conv-type', default='dynamic', type=str, choices=['dynamic', 'lightweight'], help='type of convolution') parser.add_argument('--decoder-conv-type', default='dynamic', type=str, choices=['dynamic', 'lightweight'], help='type of convolution') parser.add_argument('--weight-softmax', default=True, type=options.eval_bool) parser.add_argument('--weight-dropout', type=float, metavar='D', help='dropout probability for conv weights') parser.add_argument('--norm', type=str, default='layer_1', help='normalization module') @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = 1024 if not hasattr(args, 'max_target_positions'): args.max_target_positions = 1024 src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise RuntimeError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise RuntimeError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise RuntimeError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) encoder = LightConvEncoder(args, src_dict, encoder_embed_tokens) decoder = LightConvDecoder(args, tgt_dict, decoder_embed_tokens) return LightConvModel(encoder, decoder) class LightConvEncoder(FairseqEncoder): """ LightConv encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`LightConvEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ LightConvEncoderLayer(args, kernel_size=args.encoder_kernel_size_list[i]) for i in range(args.encoder_layers) ]) self.register_buffer('version', torch.Tensor([2])) self.normalize = args.encoder_normalize_before if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward(self, src_tokens, **unused): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.normalize: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions) class LightConvDecoder(FairseqIncrementalDecoder): """ LightConv decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`LightConvDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False, final_norm=True): super().__init__(dictionary) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ LightConvDecoderLayer(args, no_encoder_attn, kernel_size=args.decoder_kernel_size_list[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, output_embed_dim, bias=False) \ if embed_dim != output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5) self.register_buffer('version', torch.Tensor([2])) self.normalize = args.decoder_normalize_before and final_norm if self.normalize: self.layer_norm = LayerNorm(embed_dim) def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the last decoder layer's output of shape `(batch, tgt_len, vocab)` - the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)` """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, incremental_state, ) inner_states.append(x) if self.normalize: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: x = F.linear(x, self.embed_tokens.weight) else: x = F.linear(x, self.embed_out) return x, {'attn': attn, 'inner_states': inner_states} def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] class LightConvEncoderLayer(nn.Module): """Encoder layer block. Args: args (argparse.Namespace): parsed command-line arguments kernel_size: kernel size of the convolution """ def __init__(self, args, kernel_size=0): super().__init__() def extract_id(s): cand = s.split('/') for c in cand: if(c.find('cnn')>=0): return c print("error path!") exit() return 'error' self.prefix = extract_id(args.save_dir) self.embed_dim = args.encoder_embed_dim self.conv_dim = args.encoder_conv_dim padding_l = kernel_size // 2 if kernel_size % 2 == 1 else ((kernel_size - 1) // 2, kernel_size // 2) if args.encoder_glu: self.linear1 = Linear(self.embed_dim, 2*self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.encoder_conv_type == 'lightweight': self.conv = LightweightConv(self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout) elif args.encoder_conv_type == 'dynamic': self.conv = DynamicConv(self.conv_dim, kernel_size, padding_l=padding_l, weight_softmax=args.weight_softmax, num_heads=args.encoder_attention_heads, weight_dropout=args.weight_dropout) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.relu_dropout_module = FairseqDropout(args.relu_dropout, module_name=self.__class__.__name__) self.input_dropout_module = FairseqDropout(args.input_dropout, module_name=self.__class__.__name__) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) #self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for _ in range(2)]) self.layer_norms = nn.ModuleList([NormSelect(args.norm, self.embed_dim, prefix=self.prefix) for _ in range(2)]) def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(0, x, encoder_padding_mask, before=True) x = self.input_dropout_module(x) x = self.linear1(x) if self.act is not None: x = self.act(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0, 1).unsqueeze(2), 0) x = self.conv(x) x = self.linear2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(0, x, encoder_padding_mask, after=True) residual = x x = self.maybe_layer_norm(1, x, encoder_padding_mask, before=True) x = F.relu(self.fc1(x)) x = self.relu_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(1, x, encoder_padding_mask,after=True) return x def maybe_layer_norm(self, i, x, encoder_padding_mask, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return self.layer_norms[i](x, encoder_padding_mask) else: return x def extra_repr(self): return 'dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}'.format( self.dropout_module.p, self.relu_dropout_module.p, self.input_dropout_module.p, self.normalize_before) class LightConvDecoderLayer(nn.Module): """Decoder layer block. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` kernel_size: kernel size of the convolution """ def __init__(self, args, no_encoder_attn=False, kernel_size=0): super().__init__() self.embed_dim = args.decoder_embed_dim self.conv_dim = args.decoder_conv_dim if args.decoder_glu: self.linear1 = Linear(self.embed_dim, 2*self.conv_dim) self.act = nn.GLU() else: self.linear1 = Linear(self.embed_dim, self.conv_dim) self.act = None if args.decoder_conv_type == 'lightweight': self.conv = LightweightConv(self.conv_dim, kernel_size, padding_l=kernel_size-1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout) elif args.decoder_conv_type == 'dynamic': self.conv = DynamicConv(self.conv_dim, kernel_size, padding_l=kernel_size-1, weight_softmax=args.weight_softmax, num_heads=args.decoder_attention_heads, weight_dropout=args.weight_dropout) else: raise NotImplementedError self.linear2 = Linear(self.conv_dim, self.embed_dim) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.relu_dropout_module = FairseqDropout(args.relu_dropout, module_name=self.__class__.__name__) self.input_dropout_module = FairseqDropout(args.input_dropout, module_name=self.__class__.__name__) self.normalize_before = args.decoder_normalize_before self.conv_layer_norm = LayerNorm(self.embed_dim) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.need_attn = True def forward(self, x, encoder_out, encoder_padding_mask, incremental_state, prev_conv_state=None, prev_attn_state=None, conv_mask=None, conv_padding_mask=None): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.conv_layer_norm, x, before=True) if prev_conv_state is not None: if incremental_state is None: incremental_state = {} self.conv._set_input_buffer(incremental_state, prev_conv_state) x = self.input_dropout_module(x) x = self.linear1(x) if self.act is not None: x = self.act(x) x = self.conv(x, incremental_state=incremental_state) x = self.linear2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.conv_layer_norm, x, after=True) attn = None if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = F.relu(self.fc1(x)) x = self.relu_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def extra_repr(self): return 'dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}'.format( self.dropout_module.p, self.relu_dropout_module.p, self.input_dropout_module.p, self.normalize_before) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('lightconv', 'lightconv') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 7) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.relu_dropout = getattr(args, 'relu_dropout', 0.) args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.encoder_conv_dim = getattr(args, 'encoder_conv_dim', args.encoder_embed_dim) args.decoder_conv_dim = getattr(args, 'decoder_conv_dim', args.decoder_embed_dim) args.encoder_kernel_size_list = getattr(args, 'encoder_kernel_size_list', [3, 7, 15, 31, 31, 31, 31]) args.decoder_kernel_size_list = getattr(args, 'decoder_kernel_size_list', [3, 7, 15, 31, 31, 31]) if len(args.encoder_kernel_size_list) == 1: args.encoder_kernel_size_list = args.encoder_kernel_size_list * args.encoder_layers if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = args.decoder_kernel_size_list * args.decoder_layers assert len(args.encoder_kernel_size_list) == args.encoder_layers, "encoder_kernel_size_list doesn't match encoder_layers" assert len(args.decoder_kernel_size_list) == args.decoder_layers, "decoder_kernel_size_list doesn't match decoder_layers" args.encoder_glu = getattr(args, 'encoder_glu', True) args.decoder_glu = getattr(args, 'decoder_glu', True) args.input_dropout = getattr(args, 'input_dropout', 0.1) args.weight_dropout = getattr(args, 'weight_dropout', args.attention_dropout) @register_model_architecture('lightconv', 'lightconv_iwslt_de_en') def lightconv_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 7) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.weight_dropout = getattr(args, 'weight_dropout', 0.1) args.encoder_glu = getattr(args, 'encoder_glu', False) args.decoder_glu = getattr(args, 'decoder_glu', False) args.input_dropout = getattr(args, 'input_dropout', 0.0) base_architecture(args) @register_model_architecture('lightconv', 'lightconv_wmt_en_de') def lightconv_wmt_en_de(args): base_architecture(args) @register_model_architecture('lightconv', 'lightconv_wmt_en_de_big') def lightconv_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('lightconv', 'lightconv_wmt_en_fr_big') def lightconv_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) lightconv_wmt_en_de_big(args) @register_model_architecture('lightconv', 'lightconv_wmt_zh_en_big') def lightconv_wmt_zh_en_big(args): args.dropout = getattr(args, 'dropout', 0.2) args.attention_dropout = getattr(args, 'attention_dropout', 0.2) args.weight_dropout = getattr(args, 'weight_dropout', 0.2) lightconv_wmt_en_de_big(args)
37,289
45.6125
165
py
RegularizedBN
RegularizedBN-main/fairseq/models/transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Any, Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.modules import ( AdaptiveSoftmax, FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, TransformerDecoderLayer, TransformerEncoderLayer, ) from fairseq.modules import NormSelect from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor from scipy import io import numpy as np DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from fairseq.models.LSUV import LSUVinit @register_model("transformer") class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ @classmethod def hub_models(cls): # fmt: off def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } def moses_fastbpe(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'fastbpe', } return { 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'), 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2', 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'), 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'), 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'), 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'), 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'), 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'), 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'), } # fmt: on def __init__(self, args, encoder, decoder): super().__init__(encoder, decoder) self.args = args self.supports_align_args = True self.lsuv = 1 @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension (extra linear layer ' 'if different from decoder embed dim') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding') parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings') # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019) parser.add_argument('--no-cross-attention', default=False, action='store_true', help='do not perform cross-attention') parser.add_argument('--cross-self-attention', default=False, action='store_true', help='perform cross+self-attention') # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for encoder') parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for decoder') parser.add_argument('--encoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') parser.add_argument('--decoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0, help='iterative PQ quantization noise at training time') parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8, help='block size of quantization noise at training time') parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0, help='scalar quantization noise and scalar quantization at training time') parser.add_argument('--encoder-att-norm', type=str, default='layer_1', metavar='STR', help='encoder att normalization') parser.add_argument('--encoder-in-ffn-norm', type=str, default='identity', metavar='STR', help='encoder inffn normalization') parser.add_argument('--encoder-ffn-norm', type=str, default='layer_1', metavar='STR', help='encoder ffn normalization') parser.add_argument('--encoder-pre-final-norm', type=str, default='layer_1', metavar='STR', help='encoder pre-layer final norm normalization') parser.add_argument('--decoder-att-norm', type=str, default='layer_1', metavar='STR', help='encoder att normalization') parser.add_argument('--decoder-cross-att-norm', type=str, default='layer_1', metavar='STR', help='cross att normalization') parser.add_argument('--decoder-in-ffn-norm', type=str, default='identity', metavar='STR', help='decoder inffn normalization') parser.add_argument('--decoder-ffn-norm', type=str, default='layer_1', metavar='STR', help='encoder ffn normalization') parser.add_argument('--encoder-fc1', type=str, default='linear', metavar='STR', help='encoder fc1') parser.add_argument('--encoder-fc2', type=str, default='linear', metavar='STR', help='encoder fc2') parser.add_argument('--decoder-fc1', type=str, default='linear', metavar='STR', help='decoder fc1') parser.add_argument('--decoder-fc2', type=str, default='linear', metavar='STR', help='decoder fc2') parser.add_argument('--encoder-activation', type=str, default='relu', metavar='STR', help='encoder ffn activation') parser.add_argument('--with-seq', type=int, default=1,help='normalize along sequence') parser.add_argument('--same-bias', type=int, default=1,help='normalize with same affine parameters') parser.add_argument('--add-wn', type=int, default=0,help='WN') parser.add_argument('--add-cwn', type=int, default=0,help='CWN') parser.add_argument('--no-wn', type=int, default=0,help='No WN or CWN') parser.add_argument('--wn-adjust', type=int, default=0, help='wn adjust') parser.add_argument('--cwn-adjust', type=int, default=0, help='cwn adjust') parser.add_argument('--NScale', type=float, metavar='D', default=1, help='cwn scale parameter') parser.add_argument('--record-norminfo', type=int, default=0, help='norminfo') parser.add_argument('--pad-right', type=int, default=1, help='padding left or not') parser.add_argument('--orth-penalty', type=float, default=0, help='orthorgonal penalty of FFN') parser.add_argument('--valid-inconsistency', type=int, default=0, help='add inconsistent loss in validation') parser.add_argument('--normalize-q', type=int, default=0, help='normalize q in encoder self attention') parser.add_argument('--normalize-k', type=int, default=0, help='normalize k in encoder self attention') parser.add_argument('--normalize-v', type=int, default=0, help='normalize v in encoder self attention') parser.add_argument('--g0', type=float, default=10, help='gain value in self attention') parser.add_argument('--fix-g0', type=int, default=0, help='fix g0 in self attention') parser.add_argument('--record-attn-weight', type=int, default=0, help='record attn weight in encoder self attention') parser.add_argument('--record-residual-proportion', type=int, default=0, help='record residual proportion') parser.add_argument('--record-weight-norm', type=int, default=0, help='record weight norm or not') parser.add_argument('--forward-probe-position', type=str, default="none", help='forward insert probes') parser.add_argument('--forward-record-parts', type=str, default="none", help='forward record parts') parser.add_argument('--forward-record-norm-items', type=str, default="none", help='forward record norm items') parser.add_argument('--forward-record-condition-items', type=str, default="none", help='forward record condition items') parser.add_argument('--backward-probe-position', type=str, default="none", help='backward insert probes') parser.add_argument('--backward-record-parts', type=str, default="none", help='backward record parts') parser.add_argument('--backward-record-norm-items', type=str, default="none", help='backward record norm items') parser.add_argument('--backward-record-condition-items', type=str, default="none", help='backward record condition items') parser.add_argument('--encoder-embedding-dropout', type=float, default=0.3, help='encoder embedding dropout') parser.add_argument('--decoder-embedding-dropout', type=float, default=0.3, help='decoder embedding dropout') parser.add_argument('--encoder-after-norm-dropout', type=float, default=0, help='encoder after-norm dropout') parser.add_argument('--decoder-after-norm-dropout', type=float, default=0, help='decoder after-norm dropout') parser.add_argument('--before-softmax-dropout', type=float, default=0, help='before softmax dropout') parser.add_argument('--encoder-dropout', type=float, default=0.3, help='encoder dropout') parser.add_argument('--decoder-dropout', type=float, default=0.3, help='decoder dropout') parser.add_argument('--encoder-attention', type=str, default='origin', help='encoder attention') parser.add_argument('--encoder-positional-embedding', type=int, default=1, help='encoder positional embedding') parser.add_argument('--affine-weight-decay', type=int, default=1, help='affine weight decay') parser.add_argument('--setting-encoder-lr', type=int, default=0, help='encoder lr only') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, "max_source_positions", None) is None: args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if getattr(args, "max_target_positions", None) is None: args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if args.share_all_embeddings: #default: false if src_dict != tgt_dict: raise ValueError("--share-all-embeddings requires a joined dictionary") if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" ) if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path ): raise ValueError( "--share-all-embeddings not compatible with --decoder-embed-path" ) encoder_embed_tokens = cls.build_embedding( args, src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: #print(cls);#<class 'fairseq.models.transformer.TransformerModel'> encoder_embed_tokens = cls.build_embedding( args, src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = cls.build_embedding( args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) #print(encoder_embed_tokens);#Embedding(10152, 512, padding_idx=1) encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return cls(args, encoder, decoder) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() #print(padding_idx);#1 emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder( args, tgt_dict, embed_tokens, no_encoder_attn=getattr(args, "no_cross_attention", False), ) # TorchScript doesn't support optional arguments with variable length (**kwargs). # Current workaround is to add union of all arguments in child classes. def forward( self, src_tokens, src_lengths, prev_output_tokens, return_all_hiddens: bool = True, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): """ Run the forward pass for an encoder-decoder model. Copied from the base class, but without ``**kwargs``, which are not supported by TorchScript. """ #print(src_tokens[:,0]);exit() #src_lengths: 一维tensor, 表示句子长度 shape: [192,] #src_tokens: 二维tensor, 表示多个句子 shape: [192,21],每个句子最后一个词为2,表示<eos> #prev_output_tokens: 二维tensor, if self.args.pad_right and 0: # Convert left-padding to right-padding src_tokens = utils.convert_padding_direction( src_tokens, padding_idx=1, left_to_right=True ) encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens ) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) return decoder_out # Since get_normalized_probs is in the Fairseq Model which is not scriptable, # I rewrite the get_normalized_probs from Base Class to call the # helper function in the Base Class. @torch.jit.export def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) #self.dropout_module = FairseqDropout(args.encoder_embedding_dropout, module_name=self.__class__.__name__) self.dropout = args.dropout #self.dropout = args.encoder_embedding_dropout self.encoder_layerdrop = args.encoder_layerdrop embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens #self.embed_tokens.register_backward_hook(self.embed_backward_hook) #additional hook #self.embed_tokens.register_forward_hook(self.embed_forward_hook) #additional hook self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim) #取sqrt(embed_dim), 这样初始时模长与position_embed一样 self.embed_positions = ( PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings and args.encoder_positional_embedding else None ) if getattr(args, "layernorm_embedding", False): self.layernorm_embedding = LayerNorm(embed_dim) else: self.layernorm_embedding = None if not args.adaptive_input and args.quant_noise_pq > 0: self.quant_noise = apply_quant_noise_( nn.Linear(embed_dim, embed_dim, bias=False), args.quant_noise_pq, args.quant_noise_pq_block_size, ) else: self.quant_noise = None if self.encoder_layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.encoder_layerdrop) else: self.layers = nn.ModuleList([]) #print(args.encoder_layers) #exit() self.layers.extend( [self.build_encoder_layer(args) for i in range(args.encoder_layers)] ) self.num_layers = len(self.layers) if args.encoder_normalize_before: self.layer_norm = NormSelect(args.encoder_pre_final_norm ,embed_dim)#需要额外在forward加mask #self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.embed_grad = [] def extract_id(s): cand = s.split('/') for c in cand: if(c.find('transformer')>=0): return c print("error path!") exit() return 'error' self.prefix = extract_id(args.save_dir) self.src_tokens_list = [] self.interval = 1101 self.batchnum = 0 self.step = 0 self.save_src_tokens_file = 'statistics/{}/src_tokens'.format(self.prefix) self.lsuv = 1 def build_encoder_layer(self, args): return TransformerEncoderLayer(args) def embed_backward_hook(self,m,i,o): self.batchnum += 1 #print(m) #Embedding(10152, 512, padding_idx=1) #for grad in i: # print(grad.shape) #torch.Size([10152, 512]) #for grad in o: # print(grad.shape) #torch.Size([192, 16, 512]) for grad in i: self.embed_grad.append(torch.norm(grad).view(1)) if self.batchnum%self.interval==0: epoch = self.batchnum//self.interval self.embed_grad = torch.cat(self.embed_grad).cpu().numpy() d = {} file = "statistics/{}/embed_grad_{}.mat".format(self.prefix,epoch) d['embed_grad'] = self.embed_grad self.embed_grad = [] io.savemat(file,d) def embed_forward_hook(self,m,i,o): #self.batchnum += 1 #print(m) #Embedding(10152, 512, padding_idx=1) #for grad in i: # print('i') # print(grad.shape) #torch.Size([192,21]) #print(len(o)) #192 #for grad in o: # print('o') # print(grad.shape) #很多个torch.Size([21, 512]) exit() for grad in i: self.embed_grad.append(torch.norm(grad).view(1)) if self.batchnum%self.interval==0: epoch = self.batchnum//self.interval self.embed_grad = torch.cat(self.embed_grad).cpu().numpy() d = {} file = "statistics/{}/embed_grad_{}.mat".format(self.prefix,epoch) d['embed_grad'] = self.embed_grad self.embed_grad = [] from scipy import io io.savemat(file,d) def forward_embedding(self, src_tokens): # embed tokens and positions x = embed = self.embed_scale * self.embed_tokens(src_tokens) embed = self.embed_scale * self.embed_tokens(src_tokens) #print(embed[2][2]);exit() if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) #print(x[2][2]);exit() if self.layernorm_embedding is not None: print("layernorm") x = self.layernorm_embedding(x) x = self.dropout_module(x) #在embedding后边加dropout好吗?否则过拟合? #x = F.dropout(x, p=self.dropout, training=self.training) #inplace=False结果也是一样的 #print(x[2][2][-10:]);exit() if self.quant_noise is not None: x = self.quant_noise(x) return x, embed def save_src_tokens(self, src_tokens): self.src_tokens_list.append(src_tokens.cpu().numpy()) if self.step%self.interval==0: d = {} d['src_tokens'] = np.array(self.src_tokens_list) file = "{}_{}.mat".format(self.save_src_tokens_file, self.step//self.interval) self.src_tokens_list = [] io.savemate(file,d) def forward(self, src_tokens, src_lengths=None, return_all_hiddens: bool = False): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). Returns: namedtuple: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` - **encoder_embedding** (Tensor): the (scaled) embedding lookup of shape `(batch, src_len, embed_dim)` - **encoder_states** (List[Tensor]): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *return_all_hiddens* is True. """ if self.training: self.step += 1 x, encoder_embedding = self.forward_embedding(src_tokens) #print(src_tokens[:,0]);exit() # B x T x C -> T x B x C x = x.transpose(0, 1) #print(x[2][2]);exit() # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) encoder_states = [] if return_all_hiddens else None #embed_out = x # encoder layers for layer in self.layers: #x = layer(x, encoder_padding_mask) x = layer(x, encoder_padding_mask, src_tokens=src_tokens) if return_all_hiddens: assert encoder_states is not None encoder_states.append(x) if self.layer_norm is not None: x = self.layer_norm(x,encoder_padding_mask) #print(encoder_padding_mask.sum()) 大部分为0,20% 平均20左右 ''' if encoder_padding_mask.sum()>10: print(encoder_padding_mask);exit() tensor([[False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], ..., [ True, False, False, ..., False, False, False], [ True, False, False, ..., False, False, False], [ True, False, False, ..., False, False, False]], device='cuda:0') ''' #print(x[2][2]);exit() return EncoderOut( encoder_out=x, # T x B x C encoder_padding_mask=encoder_padding_mask, # B x T encoder_embedding=encoder_embedding, # B x T x C encoder_states=encoder_states, # List[T x B x C] src_tokens=None, src_lengths=None, ) @torch.jit.export def reorder_encoder_out(self, encoder_out: EncoderOut, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ """ Since encoder_padding_mask and encoder_embedding are both of type Optional[Tensor] in EncoderOut, they need to be copied as local variables for Torchscript Optional refinement """ encoder_padding_mask: Optional[Tensor] = encoder_out.encoder_padding_mask encoder_embedding: Optional[Tensor] = encoder_out.encoder_embedding new_encoder_out = ( encoder_out.encoder_out if encoder_out.encoder_out is None else encoder_out.encoder_out.index_select(1, new_order) ) new_encoder_padding_mask = ( encoder_padding_mask if encoder_padding_mask is None else encoder_padding_mask.index_select(0, new_order) ) new_encoder_embedding = ( encoder_embedding if encoder_embedding is None else encoder_embedding.index_select(0, new_order) ) src_tokens = encoder_out.src_tokens if src_tokens is not None: src_tokens = src_tokens.index_select(0, new_order) src_lengths = encoder_out.src_lengths if src_lengths is not None: src_lengths = src_lengths.index_select(0, new_order) encoder_states = encoder_out.encoder_states if encoder_states is not None: for idx, state in enumerate(encoder_states): encoder_states[idx] = state.index_select(1, new_order) return EncoderOut( encoder_out=new_encoder_out, # T x B x C encoder_padding_mask=new_encoder_padding_mask, # B x T encoder_embedding=new_encoder_embedding, # B x T x C encoder_states=encoder_states, # List[T x B x C] src_tokens=src_tokens, # B x T src_lengths=src_lengths, # B x 1 ) def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: print("deleting {0}".format(weights_key)) del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) for i in range(self.num_layers): # update layer norms self.layers[i].upgrade_state_dict_named( state_dict, "{}.layers.{}".format(name, i) ) version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): self.args = args super().__init__(dictionary) self.register_buffer("version", torch.Tensor([3])) self._future_mask = torch.empty(0) #self.dropout_module = FairseqDropout(args.decoder_embedding_dropout, module_name=self.__class__.__name__) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.before_softmax_dropout_module = nn.Dropout(p=args.before_softmax_dropout) self.decoder_layerdrop = args.decoder_layerdrop self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.embed_dim = embed_dim self.output_embed_dim = args.decoder_output_dim self.padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim) if not args.adaptive_input and args.quant_noise_pq > 0: self.quant_noise = apply_quant_noise_( nn.Linear(embed_dim, embed_dim, bias=False), args.quant_noise_pq, args.quant_noise_pq_block_size, ) else: self.quant_noise = None self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( args.max_target_positions, embed_dim, self.padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) if getattr(args, "layernorm_embedding", False): self.layernorm_embedding = LayerNorm(embed_dim) else: self.layernorm_embedding = None self.cross_self_attention = getattr(args, "cross_self_attention", False) if self.decoder_layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.decoder_layerdrop) else: self.layers = nn.ModuleList([]) self.layers.extend( [ self.build_decoder_layer(args, no_encoder_attn) for _ in range(args.decoder_layers) ] ) self.num_layers = len(self.layers) if args.decoder_normalize_before and not getattr( args, "no_decoder_final_norm", False ): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.project_out_dim = ( Linear(embed_dim, self.output_embed_dim, bias=False) if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None ) self.adaptive_softmax = None self.output_projection = None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif self.share_input_output_embed: self.output_projection = nn.Linear( self.embed_tokens.weight.shape[1], self.embed_tokens.weight.shape[0], bias=False, ) self.output_projection.weight = self.embed_tokens.weight else: self.output_projection = nn.Linear( self.output_embed_dim, len(dictionary), bias=False ) nn.init.normal_( self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5 ) def build_decoder_layer(self, args, no_encoder_attn=False): return TransformerDecoderLayer(args, no_encoder_attn) def forward( self, prev_output_tokens, encoder_out: Optional[EncoderOut] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ #print("here!") #print('features_only',features_only) #False x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, alignment_layer=alignment_layer, alignment_heads=alignment_heads, ) #print(x.shape) #torch.Size([320, 1, 512]) 一串输出都是这个size if not features_only: x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out: Optional[EncoderOut] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): return self.extract_features_scriptable( prev_output_tokens, encoder_out, incremental_state, full_context_alignment, alignment_layer, alignment_heads, ) """ A scriptable subclass of this class has an extract_features method and calls super().extract_features, but super() is not supported in torchscript. Aa copy of this function is made to be used in the subclass instead. """ def extract_features_scriptable( self, prev_output_tokens, encoder_out: Optional[EncoderOut] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, ): """ Similar to *forward* but only return features. Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). alignment_layer (int, optional): return mean alignment over heads at this layer (default: last layer). alignment_heads (int, optional): only average alignment over this many heads (default: all heads). Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ if alignment_layer is None: alignment_layer = self.num_layers - 1 # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.quant_noise is not None: x = self.quant_noise(x) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) self_attn_padding_mask: Optional[Tensor] = None if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) # decoder layers attn: Optional[Tensor] = None inner_states: List[Optional[Tensor]] = [x] for idx, layer in enumerate(self.layers): if incremental_state is None and not full_context_alignment: #true self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None x, layer_attn, _ = layer( x, encoder_out.encoder_out if encoder_out is not None else None, encoder_out.encoder_padding_mask if encoder_out is not None else None, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=bool((idx == alignment_layer)), need_head_weights=bool((idx == alignment_layer)), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float().to(x) if attn is not None: if alignment_heads is not None: attn = attn[:alignment_heads] # average probabilities over heads attn = attn.mean(dim=0) if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) x = self.before_softmax_dropout_module(x) return x, {"attn": [attn], "inner_states": inner_states} def output_layer(self, features): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary return self.output_projection(features) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround. if ( self._future_mask.size(0) == 0 or (not self._future_mask.device == tensor.device) or self._future_mask.size(0) < dim ): #true self._future_mask = torch.triu( utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1 ) self._future_mask = self._future_mask.to(tensor) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) if f"{name}.output_projection.weight" not in state_dict: if self.share_input_output_embed: embed_out_key = f"{name}.embed_tokens.weight" else: embed_out_key = f"{name}.embed_out" if embed_out_key in state_dict: state_dict[f"{name}.output_projection.weight"] = state_dict[ embed_out_key ] if not self.share_input_output_embed: del state_dict[embed_out_key] for i in range(self.num_layers): # update layer norms layer_norm_map = { "0": "self_attn_layer_norm", "1": "encoder_attn_layer_norm", "2": "final_layer_norm", } for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m) if k in state_dict: state_dict[ "{}.layers.{}.{}.{}".format(name, i, new, m) ] = state_dict[k] del state_dict[k] version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m @register_model_architecture("transformer", "transformer") def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) @register_model_architecture("transformer", "transformer_iwslt_de_en") def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_architecture(args) @register_model_architecture("transformer", "transformer_wmt_en_de") def transformer_wmt_en_de(args): base_architecture(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big") def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) @register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big") def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, "dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture("transformer", "transformer_wmt_en_de_big") def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t") def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args)
54,478
44.589121
159
py
RegularizedBN
RegularizedBN-main/fairseq/models/fconv_self_att.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( FairseqDropout, DownsampledMultiHeadAttention, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) from fairseq.incremental_decoding_utils import with_incremental_state logger = logging.getLogger(__name__) @register_model('fconv_self_att') class FConvModelSelfAtt(FairseqEncoderDecoderModel): @classmethod def hub_models(cls): return { 'conv.stories.pretrained': { 'path': 'https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz', 'checkpoint_file': 'pretrained_checkpoint.pt', 'tokenizer': 'nltk', }, 'conv.stories': { 'path': 'https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz', 'checkpoint_file': 'fusion_checkpoint.pt', 'tokenizer': 'nltk', 'pretrained': 'True', 'pretrained_checkpoint': './pretrained_checkpoint.pt', }, # Test set containing dictionaries 'data.stories': 'https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2', } def __init__(self, encoder, decoder, pretrained_encoder=None): super().__init__(encoder, decoder) self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention) self.pretrained_encoder = pretrained_encoder if self.pretrained_encoder is None: encoders = {'encoder': encoder} else: encoders = {'encoder': encoder, 'pretrained': self.pretrained_encoder} # for fusion model, CompositeEncoder contains both pretrained and training encoders # these are forwarded and then combined in the decoder self.encoder = CompositeEncoder(encoders) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]') parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension') parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]') parser.add_argument('--self-attention', type=str, metavar='EXPR', help='decoder self-attention layers, ex: [True] + [False]*5') parser.add_argument('--multihead-attention-nheads', type=int, help='Number of heads to use in attention') parser.add_argument('--multihead-self-attention-nheads', type=int, help='Number of heads to use in self-attention') parser.add_argument('--encoder-attention', type=str, metavar='EXPR', help='encoder attention [True, ...]') parser.add_argument('--encoder-attention-nheads', type=int, help='Number of heads to use in encoder attention') parser.add_argument('--project-input', type=str, metavar='EXPR', help='Use projections in self-attention [True, ...]') parser.add_argument('--gated-attention', type=str, metavar='EXPR', help='Use GLU layers in self-attention projections [True, ...]') parser.add_argument('--downsample', type=str, metavar='EXPR', help='Use downsampling in self-attention [True, ...]') parser.add_argument('--pretrained-checkpoint', metavar='DIR', help='path to load checkpoint from pretrained model') parser.add_argument('--pretrained', type=str, metavar='EXPR', help='use pretrained model when training [True, ...]') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" trained_encoder, trained_decoder = None, None pretrained = eval(args.pretrained) if pretrained: logger.info('loading pretrained model') if not os.path.exists(args.pretrained_checkpoint): new_pretrained_checkpoint = os.path.join(args.data, args.pretrained_checkpoint) if os.path.exists(new_pretrained_checkpoint): args.pretrained_checkpoint = new_pretrained_checkpoint trained_model = checkpoint_utils.load_model_ensemble( filenames=[args.pretrained_checkpoint], task=task, )[0][0] trained_decoder = list(trained_model.children())[1] trained_encoder = list(trained_model.children())[0] # freeze pretrained model for param in trained_decoder.parameters(): param.requires_grad = False for param in trained_encoder.parameters(): param.requires_grad = False encoder = FConvEncoder( task.source_dictionary, embed_dim=args.encoder_embed_dim, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, attention=eval(args.encoder_attention), attention_nheads=args.encoder_attention_nheads, ) decoder = FConvDecoder( task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, selfattention=eval(args.self_attention), attention_nheads=args.multihead_attention_nheads, selfattention_nheads=args.multihead_self_attention_nheads, project_input=eval(args.project_input), gated_attention=eval(args.gated_attention), downsample=eval(args.downsample), pretrained=pretrained, trained_decoder=trained_decoder, ) model = FConvModelSelfAtt(encoder, decoder, trained_encoder) return model @property def pretrained(self): return self.pretrained_encoder is not None class FConvEncoder(FairseqEncoder): """Convolutional encoder""" def __init__( self, dictionary, embed_dim=512, max_positions=1024, convolutions=((512, 3),) * 20, dropout=0.1, attention=False, attention_nheads=1, ): super().__init__(dictionary) self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, self.padding_idx, ) def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout) ) self.attention.append( SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): # embed tokens and positions x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) x = self.dropout_module(x) input_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B if not encoder_padding_mask.any(): encoder_padding_mask = None # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions for proj, conv, attention in zip(self.projections, self.convolutions, self.attention): residual = x if proj is None else proj(x) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) x = self.dropout_module(x) padding_l = (conv.kernel_size[0] - 1) // 2 padding_r = conv.kernel_size[0] // 2 x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if attention is not None: x = attention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(1, 0) # project back to size of embedding x = self.fc2(x) if encoder_padding_mask is not None: encoder_padding_mask = encoder_padding_mask.t() # -> B x T x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) # scale gradients (this only affects backward, not forward) x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) # add output to input embedding for attention y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5) return { 'encoder_out': (x, y), 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, new_order): encoder_out['encoder_out'] = tuple( eo.index_select(0, new_order) for eo in encoder_out['encoder_out'] ) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if 'pretrained' in encoder_out: encoder_out['pretrained']['encoder_out'] = tuple( eo.index_select(0, new_order) for eo in encoder_out['pretrained']['encoder_out'] ) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return self.embed_positions.max_positions @with_incremental_state class FConvDecoder(FairseqDecoder): """Convolutional decoder""" def __init__( self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 8, attention=True, dropout=0.1, selfattention=False, attention_nheads=1, selfattention_nheads=1, project_input=False, gated_attention=False, downsample=False, pretrained=False, trained_decoder=None, ): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([2])) self.pretrained = pretrained self.pretrained_decoder = trained_decoder self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.need_attn = True in_channels = convolutions[0][0] def expand_bool_array(val): if isinstance(val, bool): # expand True into [True, True, ...] and do the same with False return [val] * len(convolutions) return val attention = expand_bool_array(attention) selfattention = expand_bool_array(selfattention) if not isinstance(attention, list) or len(attention) != len(convolutions): raise ValueError('Attention is expected to be a list of booleans of ' 'length equal to the number of layers.') num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) self.embed_positions = PositionalEmbedding( max_positions, embed_dim, padding_idx, ) self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.selfattention = nn.ModuleList() self.attproj = nn.ModuleList() for i, (out_channels, kernel_size) in enumerate(convolutions): self.projections.append( Linear(in_channels, out_channels) if in_channels != out_channels else None ) self.convolutions.append( LinearizedConv1d( in_channels, out_channels * 2, kernel_size, padding=(kernel_size - 1), dropout=dropout, ) ) self.attention.append( DownsampledMultiHeadAttention( out_channels, embed_dim, attention_nheads, project_input=project_input, gated=False, downsample=False, ) if attention[i] else None ) self.attproj.append( Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None ) self.selfattention.append( SelfAttention( out_channels, embed_dim, selfattention_nheads, project_input=project_input, gated=gated_attention, downsample=downsample, ) if selfattention[i] else None ) in_channels = out_channels self.fc2 = Linear(in_channels, out_embed_dim) self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) # model fusion if self.pretrained: # independent gates are learned from the concatenated input self.gate1 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid()) self.gate2 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid()) # pretrained and trained models are joined self.joining = nn.Sequential( Linear(out_embed_dim*2, out_embed_dim*2), LayerNorm(out_embed_dim*2), nn.GLU(), Linear(out_embed_dim, out_embed_dim*2), LayerNorm(out_embed_dim*2), nn.GLU(), Linear(out_embed_dim, out_embed_dim), LayerNorm(out_embed_dim) ) # pretrained model contains an output layer that is nhid -> vocab size # but the models are combined in their hidden state # the hook stores the output of the pretrained model forward self.pretrained_outputs = {} def save_output(): def hook(a, b, output): self.pretrained_outputs["out"] = output return hook self.pretrained_decoder.fc2.register_forward_hook(save_output()) def forward(self, prev_output_tokens, encoder_out): trained_encoder_out = encoder_out['pretrained'] if self.pretrained else None encoder_out = encoder_out['encoder']['encoder_out'] encoder_a, encoder_b = self._split_encoder_out(encoder_out) # embed positions positions = self.embed_positions(prev_output_tokens) # embed tokens and positions x = self.embed_tokens(prev_output_tokens) + positions x = self.dropout_module(x) target_embedding = x.transpose(0, 1) # project to size of convolution x = self.fc1(x) # B x T x C -> T x B x C x = x.transpose(0, 1) # temporal convolutions avg_attn_scores = None for proj, conv, attention, selfattention, attproj in zip( self.projections, self.convolutions, self.attention, self.selfattention, self.attproj ): residual = x if proj is None else proj(x) x = self.dropout_module(x) x = conv(x) x = F.glu(x, dim=2) # attention if attention is not None: r = x x, attn_scores = attention(attproj(x) + target_embedding, encoder_a, encoder_b) x = x + r if not self.training and self.need_attn: if avg_attn_scores is None: avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) if selfattention is not None: x = selfattention(x) x = (x + residual) * math.sqrt(0.5) # T x B x C -> B x T x C x = x.transpose(0, 1) # project back to size of vocabulary x = self.fc2(x) x = self.dropout_module(x) if not self.pretrained: x = self.fc3(x) # fusion gating if self.pretrained: trained_x, _ = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out) y = torch.cat([x, self.pretrained_outputs["out"]], dim=-1) gate1 = self.gate1(y) gate2 = self.gate2(y) gated_x1 = gate1 * x gated_x2 = gate2 * self.pretrained_outputs["out"] fusion = torch.cat([gated_x1, gated_x2], dim=-1) fusion = self.joining(fusion) fusion_output = self.fc3(fusion) return fusion_output, avg_attn_scores else: return x, avg_attn_scores def max_positions(self): """Maximum output length supported by the decoder.""" return self.embed_positions.max_positions def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _split_encoder_out(self, encoder_out): """Split and transpose encoder outputs.""" # transpose only once to speed up attention layers encoder_a, encoder_b = encoder_out encoder_a = encoder_a.transpose(0, 1).contiguous() encoder_b = encoder_b.transpose(0, 1).contiguous() result = (encoder_a, encoder_b) return result class SelfAttention(nn.Module): def __init__(self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False): super().__init__() self.attention = DownsampledMultiHeadAttention( out_channels, embed_dim, num_heads, dropout=0, bias=True, project_input=project_input, gated=gated, downsample=downsample, ) self.in_proj_q = Linear(out_channels, embed_dim) self.in_proj_k = Linear(out_channels, embed_dim) self.in_proj_v = Linear(out_channels, embed_dim) self.ln = LayerNorm(out_channels) def forward(self, x): residual = x query = self.in_proj_q(x) key = self.in_proj_k(x) value = self.in_proj_v(x) x, _ = self.attention(query, key, value, mask_future_timesteps=True, use_scalar_bias=True) return self.ln(x + residual) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) m.weight.data.normal_(0, 0.1) return m def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) m.weight.data.normal_(0, 0.1) return m def Linear(in_features, out_features, dropout=0.): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return m def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0., **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m def ConvTBC(in_channels, out_channels, kernel_size, dropout=0., **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m @register_model_architecture('fconv_self_att', 'fconv_self_att') def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 3') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 8') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.decoder_attention = getattr(args, 'decoder_attention', 'True') args.self_attention = getattr(args, 'self_attention', 'False') args.encoder_attention = getattr(args, 'encoder_attention', 'False') args.multihead_attention_nheads = getattr(args, 'multihead_attention_nheads', 1) args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 1) args.encoder_attention_nheads = getattr(args, 'encoder_attention_nheads', 1) args.project_input = getattr(args, 'project_input', 'False') args.gated_attention = getattr(args, 'gated_attention', 'False') args.downsample = getattr(args, 'downsample', 'False') args.pretrained_checkpoint = getattr(args, 'pretrained_checkpoint', '') args.pretrained = getattr(args, 'pretrained', 'False') @register_model_architecture('fconv_self_att', 'fconv_self_att_wp') def fconv_self_att_wp(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256) args.encoder_layers = getattr(args, 'encoder_layers', '[(128, 3)] * 2 + [(512,3)] * 1') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256) args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.self_attention = getattr(args, 'self_attention', 'True') args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 4) args.project_input = getattr(args, 'project_input', 'True') args.gated_attention = getattr(args, 'gated_attention', 'True') args.downsample = getattr(args, 'downsample', 'True') base_architecture(args)
24,306
40.198305
111
py
RegularizedBN
RegularizedBN-main/fairseq/models/fairseq_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Tuple import torch.nn as nn from fairseq import utils from torch import Tensor class FairseqDecoder(nn.Module): """Base class for decoders.""" def __init__(self, dictionary): super().__init__() self.dictionary = dictionary self.onnx_trace = False def forward(self, prev_output_tokens, encoder_out=None, **kwargs): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, **kwargs ) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def output_layer(self, features, **kwargs): """ Project features to the default output size, e.g., vocabulary size. Args: features (Tensor): features returned by *extract_features*. """ raise NotImplementedError def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None: if sample is not None: assert "target" in sample target = sample["target"] else: target = None out = self.adaptive_softmax.get_log_prob(net_output[0], target=target) return out.exp_() if not log_probs else out logits = net_output[0] if log_probs: return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace) else: return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace) def max_positions(self): """Maximum input length supported by the decoder.""" return 1e6 # an arbitrary large number def upgrade_state_dict(self, state_dict): """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict def prepare_for_onnx_export_(self): self.onnx_trace = True
3,064
32.681319
83
py
RegularizedBN
RegularizedBN-main/fairseq/models/fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Base classes for various fairseq models. """ import logging from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.checkpoint_utils import prune_state_dict from fairseq.data import Dictionary from fairseq.models import FairseqDecoder, FairseqEncoder from torch import Tensor logger = logging.getLogger(__name__) class BaseFairseqModel(nn.Module): """Base class for fairseq models.""" def __init__(self): super().__init__() self._is_generation_fast = False @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" pass @classmethod def build_model(cls, args, task): """Build a new model instance.""" raise NotImplementedError("Model must implement the build_model method") def get_targets(self, sample, net_output): """Get targets from either the sample or the net's output.""" return sample["target"] def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" return self.get_normalized_probs_scriptable(net_output, log_probs, sample) # TorchScript doesn't support super() method so that the scriptable Subclass # can't access the base class model in Torchscript. # Current workaround is to add a helper function with different name and # call the helper function from scriptable Subclass. def get_normalized_probs_scriptable( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Scriptable helper function for get_normalized_probs in ~BaseFairseqModel""" if hasattr(self, "decoder"): return self.decoder.get_normalized_probs(net_output, log_probs, sample) elif torch.is_tensor(net_output): logits = net_output.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def extract_features(self, *args, **kwargs): """Similar to *forward* but only return features.""" return self(*args, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return None def load_state_dict(self, state_dict, strict=True, args=None): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ self.upgrade_state_dict(state_dict) new_state_dict = prune_state_dict(state_dict, args) return super().load_state_dict(new_state_dict, strict) def upgrade_state_dict(self, state_dict): """Upgrade old state dicts to work with newer code.""" self.upgrade_state_dict_named(state_dict, "") def upgrade_state_dict_named(self, state_dict, name): """Upgrade old state dicts to work with newer code. Args: state_dict (dict): state dictionary to upgrade, in place name (str): the state dict key corresponding to the current module """ assert state_dict is not None def do_upgrade(m, prefix): if len(prefix) > 0: prefix += "." for n, c in m.named_children(): name = prefix + n if hasattr(c, "upgrade_state_dict_named"): c.upgrade_state_dict_named(state_dict, name) elif hasattr(c, "upgrade_state_dict"): c.upgrade_state_dict(state_dict) do_upgrade(c, name) do_upgrade(self, name) def set_num_updates(self, num_updates): """State from trainer to pass along to model at every update.""" def _apply(m): if hasattr(m, 'set_num_updates') and m != self: m.set_num_updates(num_updates) self.apply(_apply) def prepare_for_inference_(self, args): """Prepare model for inference.""" kwargs = {} kwargs['beamable_mm_beam_size'] = ( None if getattr(args, 'no_beamable_mm', False) else getattr(args, 'beam', 5) ) kwargs['need_attn'] = getattr(args, 'print_alignment', False) if hasattr(args, 'retain_dropout'): kwargs['retain_dropout'] = args.retain_dropout kwargs['retain_dropout_modules'] = getattr( args, 'retain_dropout_modules', None ) self.make_generation_fast_(**kwargs) def make_generation_fast_(self, **kwargs): """ Legacy entry point to optimize model for faster generation. Prefer prepare_for_inference_. """ if self._is_generation_fast: return # only apply once self._is_generation_fast = True # remove weight norm from all modules in the network def apply_remove_weight_norm(module): try: nn.utils.remove_weight_norm(module) except ValueError: # this module didn't have weight norm return self.apply(apply_remove_weight_norm) def apply_make_generation_fast_(module, prefix): if len(prefix) > 0: prefix += "." base_func = BaseFairseqModel.make_generation_fast_ for n, m in module.named_modules(): if ( m != self and hasattr(m, "make_generation_fast_") # don't call this implementation again, e.g., if # children modules also inherit from BaseFairseqModel and m.make_generation_fast_.__func__ is not base_func ): name = prefix + n m.make_generation_fast_(name=name, **kwargs) apply_make_generation_fast_(self, "") def train(mode=True): if mode: raise RuntimeError("cannot train after make_generation_fast") # this model should no longer be used for training self.eval() self.train = train def prepare_for_onnx_export_(self, **kwargs): """Make model exportable via ONNX trace.""" seen = set() def apply_prepare_for_onnx_export_(module): if ( module != self and hasattr(module, "prepare_for_onnx_export_") and module not in seen ): seen.add(module) module.prepare_for_onnx_export_(**kwargs) self.apply(apply_prepare_for_onnx_export_) def prepare_for_tpu_(self, **kwargs): """Optionally modify model for use on TPUs.""" seen = set() def apply_prepare_for_tpu_(module): if ( module != self and hasattr(module, "prepare_for_tpu_") and module not in seen ): seen.add(module) module.prepare_for_tpu_(**kwargs) self.apply(apply_prepare_for_tpu_) @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", **kwargs, ): """ Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model file. Downloads and caches the pre-trained model file if needed. The base implementation returns a :class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to generate translations or sample from language models. The underlying :class:`~fairseq.models.FairseqModel` can be accessed via the *generator.models* attribute. Other models may override this to implement custom hub interfaces. Args: model_name_or_path (str): either the name of a pre-trained model to load or a path/URL to a pre-trained model state dict checkpoint_file (str, optional): colon-separated list of checkpoint files in the model archive to ensemble (default: 'model.pt') data_name_or_path (str, optional): point args.data to the archive at the given path/URL. Can start with '.' or './' to reuse the model archive path. """ from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), **kwargs, ) logger.info(x["args"]) return hub_utils.GeneratorHubInterface(x["args"], x["task"], x["models"]) @classmethod def hub_models(cls): return {} class FairseqEncoderDecoderModel(BaseFairseqModel): """Base class for encoder-decoder models. Args: encoder (FairseqEncoder): the encoder decoder (FairseqDecoder): the decoder """ def __init__(self, encoder, decoder): super().__init__() self.encoder = encoder self.decoder = decoder assert isinstance(self.encoder, FairseqEncoder) assert isinstance(self.decoder, FairseqDecoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return decoder_out def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) features = self.decoder.extract_features( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return features def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return (self.encoder.max_positions(), self.decoder.max_positions()) def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() class FairseqModel(FairseqEncoderDecoderModel): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) utils.deprecation_warning( "FairseqModel is deprecated, please use FairseqEncoderDecoderModel " "or BaseFairseqModel instead", stacklevel=4, ) class FairseqMultiModel(BaseFairseqModel): """Base class for combining multiple encoder-decoder models.""" def __init__(self, encoders, decoders): super().__init__() assert encoders.keys() == decoders.keys() self.keys = list(encoders.keys()) for key in self.keys: assert isinstance(encoders[key], FairseqEncoder) assert isinstance(decoders[key], FairseqDecoder) self.models = nn.ModuleDict( { key: FairseqEncoderDecoderModel(encoders[key], decoders[key]) for key in self.keys } ) @staticmethod def build_shared_embeddings( dicts: Dict[str, Dictionary], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str] = None, ): """ Helper function to build shared embeddings for a set of languages after checking that all dicts corresponding to those languages are equivalent. Args: dicts: Dict of lang_id to its corresponding Dictionary langs: languages that we want to share embeddings for embed_dim: embedding dimension build_embedding: callable function to actually build the embedding pretrained_embed_path: Optional path to load pretrained embeddings """ shared_dict = dicts[langs[0]] if any(dicts[lang] != shared_dict for lang in langs): raise ValueError( "--share-*-embeddings requires a joined dictionary: " "--share-encoder-embeddings requires a joined source " "dictionary, --share-decoder-embeddings requires a joined " "target dictionary, and --share-all-embeddings requires a " "joint source + target dictionary." ) return build_embedding(shared_dict, embed_dim, pretrained_embed_path) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): raise NotImplementedError def max_positions(self): """Maximum length supported by the model.""" return { key: ( self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions(), ) for key in self.keys } def max_decoder_positions(self): """Maximum length supported by the decoder.""" return min(model.decoder.max_positions() for model in self.models.values()) @property def encoder(self): return self.models[self.keys[0]].encoder @property def decoder(self): return self.models[self.keys[0]].decoder def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def load_state_dict(self, state_dict, strict=True, args=None): """Copies parameters and buffers from *state_dict* into this module and its descendants. Overrides the method in :class:`nn.Module`. Compared with that method this additionally "upgrades" *state_dicts* from old checkpoints. """ self.upgrade_state_dict(state_dict) new_state_dict = prune_state_dict(state_dict, args) return super().load_state_dict(new_state_dict, strict) class FairseqLanguageModel(BaseFairseqModel): """Base class for decoder-only models. Args: decoder (FairseqDecoder): the decoder """ def __init__(self, decoder): super().__init__() self.decoder = decoder assert isinstance(self.decoder, FairseqDecoder) def forward(self, src_tokens, **kwargs): """ Run the forward pass for a decoder-only model. Feeds a batch of tokens through the decoder to predict the next tokens. Args: src_tokens (LongTensor): tokens on which to condition the decoder, of shape `(batch, tgt_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: tuple: - the decoder's output of shape `(batch, seq_len, vocab)` - a dictionary with any model-specific outputs """ return self.decoder(src_tokens, **kwargs) def forward_decoder(self, prev_output_tokens, **kwargs): return self.decoder(prev_output_tokens, **kwargs) def extract_features(self, src_tokens, **kwargs): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, seq_len, embed_dim)` - a dictionary with any model-specific outputs """ return self.decoder.extract_features(src_tokens, **kwargs) def output_layer(self, features, **kwargs): """Project features to the default output size (typically vocabulary size).""" return self.decoder.output_layer(features, **kwargs) def max_positions(self): """Maximum length supported by the model.""" return self.decoder.max_positions() def max_decoder_positions(self): """Maximum length supported by the decoder.""" return self.decoder.max_positions() @property def supported_targets(self): return {"future"} class FairseqEncoderModel(BaseFairseqModel): """Base class for encoder-only models. Args: encoder (FairseqEncoder): the encoder """ def __init__(self, encoder): super().__init__() self.encoder = encoder assert isinstance(self.encoder, FairseqEncoder) def forward(self, src_tokens, src_lengths, **kwargs): """ Run the forward pass for a encoder-only model. Feeds a batch of tokens through the encoder to generate features. Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` Returns: the encoder's output, typically of shape `(batch, src_len, features)` """ return self.encoder(src_tokens, src_lengths, **kwargs) def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" encoder_out = net_output["encoder_out"] if torch.is_tensor(encoder_out): logits = encoder_out.float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) raise NotImplementedError def max_positions(self): """Maximum length supported by the model.""" return self.encoder.max_positions()
19,311
34.696858
86
py
RegularizedBN
RegularizedBN-main/fairseq/models/fairseq_incremental_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Dict, Optional from torch import Tensor from fairseq.models import FairseqDecoder from fairseq.incremental_decoding_utils import with_incremental_state logger = logging.getLogger(__name__) @with_incremental_state class FairseqIncrementalDecoder(FairseqDecoder): """Base class for incremental decoders. Incremental decoding is a special mode at inference time where the Model only receives a single timestep of input corresponding to the previous output token (for teacher forcing) and must produce the next output *incrementally*. Thus the model must cache any long-term state that is needed about the sequence, e.g., hidden states, convolutional states, etc. Compared to the standard :class:`FairseqDecoder` interface, the incremental decoder interface allows :func:`forward` functions to take an extra keyword argument (*incremental_state*) that can be used to cache state across time-steps. The :class:`FairseqIncrementalDecoder` interface also defines the :func:`reorder_incremental_state` method, which is used during beam search to select and reorder the incremental state based on the selection of beams. To learn more about how incremental decoding works, refer to `this blog <http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_. """ def __init__(self, dictionary): super().__init__(dictionary) def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs): """ Args: prev_output_tokens (LongTensor): shifted output tokens of shape `(batch, tgt_len)`, for teacher forcing encoder_out (dict, optional): output from the encoder, used for encoder-side attention incremental_state (dict, optional): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ raise NotImplementedError def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs): """ Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ raise NotImplementedError def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder incremental state. This will be called when the order of the input has changed from the previous time step. A typical use case is beam search, where the input order changes between time steps based on the selection of beams. """ pass def reorder_incremental_state_scripting( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Main entry point for reordering the incremental state. Due to limitations in TorchScript, we call this function in :class:`fairseq.sequence_generator.SequenceGenerator` instead of calling :func:`reorder_incremental_state` directly. """ for module in self.modules(): if hasattr(module, 'reorder_incremental_state'): result = module.reorder_incremental_state(incremental_state, new_order) if result is not None: incremental_state = result def set_beam_size(self, beam_size): """Sets the beam size in the decoder and all children.""" if getattr(self, '_beam_size', -1) != beam_size: seen = set() def apply_set_beam_size(module): if module != self and hasattr(module, 'set_beam_size') \ and module not in seen: seen.add(module) module.set_beam_size(beam_size) self.apply(apply_set_beam_size) self._beam_size = beam_size
4,387
37.831858
103
py
RegularizedBN
RegularizedBN-main/fairseq/models/distributed_fairseq_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect import torch.nn as nn from fairseq.legacy_distributed_data_parallel import LegacyDistributedDataParallel from fairseq.models import BaseFairseqModel _GOSSIP_DISABLED = False try: import gossip except ImportError: _GOSSIP_DISABLED = True def DistributedFairseqModel(args, model, process_group=None): """ Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap """ # determine which DDP class to extend assert isinstance(model, nn.Module) if args.distributed_wrapper == 'DDP' and args.ddp_backend == 'c10d': ddp_class = nn.parallel.DistributedDataParallel init_kwargs = dict( module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, bucket_cap_mb=args.bucket_cap_mb, process_group=process_group, ) # Maintain backward compatibility if 'check_reduction' in inspect.getargspec(ddp_class)[0]: init_kwargs['check_reduction'] = True if 'find_unused_parameters' in inspect.getargspec(ddp_class)[0]: init_kwargs['find_unused_parameters'] = args.find_unused_parameters elif args.distributed_wrapper == 'DDP' and args.ddp_backend == 'no_c10d': ddp_class = LegacyDistributedDataParallel init_kwargs = dict( module=model, world_size=args.distributed_world_size, buffer_size=2**28, process_group=process_group, ) elif args.distributed_wrapper == 'SlowMo': if _GOSSIP_DISABLED: raise ImportError( 'Cannot find gossip library. Please install from: ' 'github.com/facebookresearch/stochastic_gradient_push' ) ddp_class = gossip.GossipDataParallel # The values of slowmo_momentum below were obtained by tuning on the # En-De 16 dataset by training the transformer_wmt_en_de_large model if args.slowmo_momentum is None: if args.distributed_world_size <= 16: args.slowmo_momentum = 0.0 elif args.distributed_world_size <= 32: args.slowmo_momentum = 0.2 elif args.distributed_world_size <= 64: args.slowmo_momentum = 0.5 else: args.slowmo_momentum = 0.6 init_kwargs = dict( module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, nprocs_per_node=args.nprocs_per_node, slowmo_momentum=args.slowmo_momentum, localsgd=(args.slowmo_algorithm == 'LocalSGD'), localsgd_frequency=args.localsgd_frequency ) else: raise ValueError('Unknown --ddp-backend: ' + args.ddp_backend) class _DistributedFairseqModel(ddp_class): """Extend DistributedDataParallel to check for missing attributes in the wrapped module.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __getattr__(self, name): wrapped_module = super().__getattr__('module') if hasattr(wrapped_module, name): return getattr(wrapped_module, name) return super().__getattr__(name) return _DistributedFairseqModel(**init_kwargs)
3,982
36.575472
82
py
RegularizedBN
RegularizedBN-main/fairseq/models/LSUV.py
from __future__ import print_function import numpy as np import torch import torch.nn.init import torch.nn as nn gg = {} gg['hook_position'] = 0 gg['total_fc_conv_layers'] = 0 gg['done_counter'] = -1 gg['hook'] = None gg['act_dict'] = {} gg['counter_to_apply_correction'] = 0 gg['correction_needed'] = False gg['current_coef'] = 1.0 # Orthonorm init code is taked from Lasagne # https://github.com/Lasagne/Lasagne/blob/master/lasagne/init.py def svd_orthonormal(w): shape = w.shape if len(shape) < 2: raise RuntimeError("Only shapes of length 2 or more are supported.") flat_shape = (shape[0], np.prod(shape[1:])) a = np.random.normal(0.0, 1.0, flat_shape)#w; u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v print (shape, flat_shape) q = q.reshape(shape) return q.astype(np.float32) def store_activations(self, input, output): gg['act_dict'] = output.data.cpu().numpy(); #print('act shape = ', gg['act_dict'].shape) return def add_current_hook(m): if gg['hook'] is not None: return if (isinstance(m, nn.Conv2d)) or (isinstance(m, nn.Linear)): #print 'trying to hook to', m, gg['hook_position'], gg['done_counter'] if gg['hook_position'] > gg['done_counter']: gg['hook'] = m.register_forward_hook(store_activations) #print ' hooking layer = ', gg['hook_position'], m else: #print m, 'already done, skipping' gg['hook_position'] += 1 return def count_conv_fc_layers(m): if (isinstance(m, nn.Conv2d)) or (isinstance(m, nn.Linear)): gg['total_fc_conv_layers'] +=1 return def remove_hooks(hooks): for h in hooks: h.remove() return def orthogonal_weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): if hasattr(m, 'weight'): w_ortho = svd_orthonormal(m.weight.data.cpu().numpy()) m.weight.data = torch.from_numpy(w_ortho) try: nn.init.constant(m.bias, 0) except: pass else: #nn.init.orthogonal(m.weight) w_ortho = svd_orthonormal(m.weight.data.cpu().numpy()) #print w_ortho #m.weight.data.copy_(torch.from_numpy(w_ortho)) m.weight.data = torch.from_numpy(w_ortho) try: nn.init.constant(m.bias, 0) except: pass return def apply_weights_correction(m): if gg['hook'] is None: return if not gg['correction_needed']: return if (isinstance(m, nn.Conv2d)) or (isinstance(m, nn.Linear)): if gg['counter_to_apply_correction'] < gg['hook_position']: gg['counter_to_apply_correction'] += 1 else: if hasattr(m, 'weight'): m.weight.data *= float(gg['current_coef']) gg['correction_needed'] = False if hasattr(m, 'bias'): if m.bias is not None: m.bias.data += float(gg['current_bias']) return return def LSUVinit(model,data, needed_std = 1.0, std_tol = 0.1, max_attempts = 10, do_orthonorm = True,needed_mean = 0., cuda = False, verbose = True): #cuda = data.is_cuda gg['total_fc_conv_layers']=0 gg['done_counter']= 0 gg['hook_position'] = 0 gg['hook'] = None model.eval() #if cuda: # model = model.cuda() # data = data.cuda() #else: # model = model.cpu() # data = data.cpu() if verbose: print( 'Starting LSUV') model.apply(count_conv_fc_layers) if verbose: print ('Total layers to process:', gg['total_fc_conv_layers']) with torch.no_grad(): if do_orthonorm: model.apply(orthogonal_weights_init) if verbose: print ('Orthonorm done') #if cuda: # model = model.cuda() for layer_idx in range(gg['total_fc_conv_layers']): if verbose: print (layer_idx) model.apply(add_current_hook) out = model(data) current_std = gg['act_dict'].std() current_mean = gg['act_dict'].mean() if verbose: print ('std at layer ',layer_idx, ' = ', current_std) #print gg['act_dict'].shape attempts = 0 while (np.abs(current_std - needed_std) > std_tol): gg['current_coef'] = needed_std / (current_std + 1e-8); gg['current_bias'] = needed_mean - current_mean * gg['current_coef']; gg['correction_needed'] = True model.apply(apply_weights_correction) #if cuda: # model = model.cuda() out = model(data) current_std = gg['act_dict'].std() current_mean = gg['act_dict'].mean() if verbose: print ('std at layer ',layer_idx, ' = ', current_std, 'mean = ', current_mean) attempts+=1 if attempts > max_attempts: if verbose: print ('Cannot converge in ', max_attempts, 'iterations') break if gg['hook'] is not None: gg['hook'].remove() gg['done_counter']+=1 gg['counter_to_apply_correction'] = 0 gg['hook_position'] = 0 gg['hook'] = None if verbose: print ('finish at layer',layer_idx ) if verbose: print ('LSUV init done!') #if not cuda: # model = model.cpu() model.train() return model
5,573
34.730769
145
py
RegularizedBN
RegularizedBN-main/fairseq/models/wav2vec/wav2vec.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math import sys import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import BaseFairseqModel, register_model, register_model_architecture from fairseq.modules import ( Fp32GroupNorm, Fp32LayerNorm, GumbelVectorQuantizer, KmeansVectorQuantizer, TransposeLast, ) from fairseq.utils import buffered_arange logger = logging.getLogger(__name__) @register_model("wav2vec") class Wav2VecModel(BaseFairseqModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--prediction-steps", type=int, metavar="N", help="number of steps ahead to predict", ) parser.add_argument( "--sample-distance", type=int, metavar="N", help="sample distance from target. does not work properly with cross-sampling", ) parser.add_argument( "--cross-sample-negatives", type=int, metavar="N", help="num of cross sampled negatives", ) parser.add_argument( "--num-negatives", type=int, metavar="N", help="number of negative examples" ) parser.add_argument( "--conv-feature-layers", type=str, metavar="EXPR", help="convolutional feature extraction layers [(dim, kernel_size, stride), ...]", ) parser.add_argument( "--conv-aggregator-layers", type=str, metavar="EXPR", help="convolutional feature extraction layers [(dim, kernel_size, stride), ...]", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout to apply within the model", ) parser.add_argument( "--dropout-features", type=float, metavar="D", help="dropout to apply to the features", ) parser.add_argument( "--dropout-agg", type=float, metavar="D", help="dropout to apply after aggregation step", ) parser.add_argument( "--encoder", type=str, choices=["cnn"], help="type of encoder to use" ) parser.add_argument( "--aggregator", type=str, choices=["cnn", "gru"], help="type of aggregator to use", ) parser.add_argument( "--gru-dim", type=int, metavar="N", help="GRU dimensionality" ) parser.add_argument( "--no-conv-bias", action="store_true", help="if set, does not learn bias for conv layers", ) parser.add_argument( "--agg-zero-pad", action="store_true", help="if set, zero pads in aggregator instead of repl pad", ) parser.add_argument( "--skip-connections-feat", action="store_true", help="if set, adds skip connections to the feature extractor", ) parser.add_argument( "--skip-connections-agg", action="store_true", help="if set, adds skip connections to the aggregator", ) parser.add_argument( "--residual-scale", type=float, metavar="D", help="scales residual by sqrt(value)", ) parser.add_argument( "--log-compression", action="store_true", help="if set, adds a log compression to feature extractor", ) parser.add_argument( "--balanced-classes", action="store_true", help="if set, loss is scaled to balance for number of negatives", ) parser.add_argument( "--project-features", choices=["none", "same", "new"], help="if not none, features are projected using the (same or new) aggregator", ) parser.add_argument( "--non-affine-group-norm", action="store_true", help="if set, group norm is not affine", ) parser.add_argument( "--offset", help="if set, introduces an offset from target to predictions. " 'if set to "auto", it is computed automatically from the receptive field', ) parser.add_argument( "--activation", type=str, choices=["relu", "gelu"], help="which activation function to use", ) parser.add_argument( "--vq-type", type=str, choices=["none", "gumbel", "kmeans"], help="which type of quantizer to use", ) parser.add_argument( "--vq-vars", type=int, metavar="N", help="if set, project to this many vector quantized variables per group", ) parser.add_argument( "--vq-groups", type=int, metavar="N", help="number of groups of latent variables", ) parser.add_argument( "--vq-dim", type=int, metavar="N", help="uses this dimensionality for quantized vectors", ) parser.add_argument( "--vq-depth", type=int, metavar="N", help="number of layers for vq weight projection", ) parser.add_argument( "--combine-groups", action="store_true", help="if set, variables are shared among groups", ) parser.add_argument( "--vq-temp", type=str, metavar="TEMP", help="temperature for latent variable sampling with gumbel softmax. should be a tuple of 3 values (start, end, decay)", ) parser.add_argument( "--vq-gamma", type=float, metavar="D", help="gamma parameter for kmeans style vector quantization", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_wav2vec_architecture(args) model = Wav2VecModel(args) logger.info(model) return model def __init__(self, args): super().__init__() self.prediction_steps = args.prediction_steps offset = args.offset if args.activation == "relu": activation = nn.ReLU() elif args.activation == "gelu": activation = nn.GELU() else: raise Exception("unknown activation " + args.activation) if args.encoder == "cnn": feature_enc_layers = eval(args.conv_feature_layers) self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, log_compression=args.log_compression, skip_connections=args.skip_connections_feat, residual_scale=args.residual_scale, non_affine_group_norm=args.non_affine_group_norm, activation=activation, ) embed = feature_enc_layers[-1][0] else: raise Exception("unknown encoder type " + args.encoder) self.vector_quantizer = None if args.vq_type == "gumbel": self.vector_quantizer = GumbelVectorQuantizer( dim=embed, num_vars=args.vq_vars, temp=eval(args.vq_temp), groups=args.vq_groups, combine_groups=args.combine_groups, vq_dim=args.vq_dim if args.vq_dim > 0 else embed, time_first=False, activation=activation, weight_proj_depth=args.vq_depth, weight_proj_factor=2, ) elif args.vq_type == "kmeans": self.vector_quantizer = KmeansVectorQuantizer( dim=embed, num_vars=args.vq_vars, groups=args.vq_groups, combine_groups=args.combine_groups, vq_dim=args.vq_dim if args.vq_dim > 0 else embed, time_first=False, gamma=args.vq_gamma, ) else: assert ( args.vq_type == "none" or args.vq_type is None ), "Unknown quantizer type" if args.offset == "auto": assert args.encoder == "cnn" jin = 0 rin = 0 for _, k, stride in feature_enc_layers: if rin == 0: rin = k rin = rin + (k - 1) * jin if jin == 0: jin = stride else: jin *= stride offset = math.ceil(rin / jin) offset = int(offset) def make_aggregator(): if args.aggregator == "cnn": agg_layers = eval(args.conv_aggregator_layers) agg_dim = agg_layers[-1][0] feature_aggregator = ConvAggegator( conv_layers=agg_layers, embed=embed, dropout=args.dropout, skip_connections=args.skip_connections_agg, residual_scale=args.residual_scale, non_affine_group_norm=args.non_affine_group_norm, conv_bias=not args.no_conv_bias, zero_pad=args.agg_zero_pad, activation=activation, ) elif args.aggregator == "gru": agg_dim = args.gru_dim feature_aggregator = nn.Sequential( TransposeLast(), nn.GRU( input_size=embed, hidden_size=agg_dim, num_layers=1, dropout=args.dropout, ), TransposeLast(deconstruct_idx=0), ) else: raise Exception("unknown aggregator type " + args.aggregator) return feature_aggregator, agg_dim self.feature_aggregator, agg_dim = make_aggregator() self.wav2vec_predictions = Wav2VecPredictionsModel( in_dim=agg_dim, out_dim=embed, prediction_steps=args.prediction_steps, n_negatives=args.num_negatives, cross_sample_negatives=args.cross_sample_negatives, sample_distance=args.sample_distance, dropout=args.dropout, offset=offset, balanced_classes=args.balanced_classes, infonce=args.infonce, ) self.dropout_feats = nn.Dropout(p=args.dropout_features) self.dropout_agg = nn.Dropout(p=args.dropout_agg) if args.project_features == "none": self.project_features = None elif args.project_features == "same": self.project_features = self.feature_aggregator elif args.project_features == "new": self.project_features, _ = make_aggregator() def forward(self, source): result = {} features = self.feature_extractor(source) if self.vector_quantizer: q_res = self.vector_quantizer(features) features = q_res["x"] for k in q_res.keys(): if k != "x": result[k] = q_res[k] x = self.dropout_feats(features) x = self.feature_aggregator(x) x = self.dropout_agg(x) if self.project_features is not None: features = self.project_features(features) x, targets = self.wav2vec_predictions(x, features) result["cpc_logits"] = x result["cpc_targets"] = targets return result def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) def max_positions(self): """Maximum length supported by the model.""" return sys.maxsize def get_logits(self, net_output): logits = net_output["cpc_logits"] return logits def get_targets(self, sample, net_output): t = net_output["cpc_targets"] if isinstance(t, tuple): t = t[0] return t.contiguous() def get_target_weights(self, targets, net_output): targets = net_output["cpc_targets"] if isinstance(targets, tuple) and targets[-1] is not None: return targets[-1] return None def get_extra_losses(self, net_output): loss = None if "prob_perplexity" in net_output: loss = net_output["num_vars"] - net_output["prob_perplexity"] elif "kmeans_loss" in net_output: loss = net_output["kmeans_loss"] return loss def norm_block(is_layer_norm, dim, affine=True): if is_layer_norm: mod = nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=affine), TransposeLast(), ) else: mod = Fp32GroupNorm(1, dim, affine=affine) return mod class ConvFeatureExtractionModel(nn.Module): def __init__( self, conv_layers, dropout, log_compression, skip_connections, residual_scale, non_affine_group_norm, activation, ): super().__init__() def block(n_in, n_out, k, stride): return nn.Sequential( nn.Conv1d(n_in, n_out, k, stride=stride, bias=False), nn.Dropout(p=dropout), norm_block( is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm ), activation, ) in_d = 1 self.conv_layers = nn.ModuleList() for dim, k, stride in conv_layers: self.conv_layers.append(block(in_d, dim, k, stride)) in_d = dim self.log_compression = log_compression self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): # BxT -> BxCxT x = x.unsqueeze(1) for conv in self.conv_layers: residual = x x = conv(x) if self.skip_connections and x.size(1) == residual.size(1): tsz = x.size(2) r_tsz = residual.size(2) residual = residual[..., :: r_tsz // tsz][..., :tsz] x = (x + residual) * self.residual_scale if self.log_compression: x = x.abs() x = x + 1 x = x.log() return x class ZeroPad1d(nn.Module): def __init__(self, pad_left, pad_right): super().__init__() self.pad_left = pad_left self.pad_right = pad_right def forward(self, x): return F.pad(x, (self.pad_left, self.pad_right)) class ConvAggegator(nn.Module): def __init__( self, conv_layers, embed, dropout, skip_connections, residual_scale, non_affine_group_norm, conv_bias, zero_pad, activation, ): super().__init__() def block(n_in, n_out, k, stride): # padding dims only really make sense for stride = 1 ka = k // 2 kb = ka - 1 if k % 2 == 0 else ka pad = ( ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0)) ) return nn.Sequential( pad, nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias), nn.Dropout(p=dropout), norm_block(False, n_out, affine=not non_affine_group_norm), activation, ) in_d = embed self.conv_layers = nn.ModuleList() self.residual_proj = nn.ModuleList() for dim, k, stride in conv_layers: if in_d != dim and skip_connections: self.residual_proj.append(nn.Conv1d(in_d, dim, 1, bias=False)) else: self.residual_proj.append(None) self.conv_layers.append(block(in_d, dim, k, stride)) in_d = dim self.conv_layers = nn.Sequential(*self.conv_layers) self.skip_connections = skip_connections self.residual_scale = math.sqrt(residual_scale) def forward(self, x): for rproj, conv in zip(self.residual_proj, self.conv_layers): residual = x x = conv(x) if self.skip_connections: if rproj is not None: residual = rproj(residual) x = (x + residual) * self.residual_scale return x class Wav2VecPredictionsModel(nn.Module): def __init__( self, in_dim, out_dim, prediction_steps, n_negatives, cross_sample_negatives, sample_distance, dropout, offset, balanced_classes, infonce, ): super().__init__() self.n_negatives = n_negatives self.cross_sample_negatives = cross_sample_negatives self.sample_distance = sample_distance self.project_to_steps = nn.ConvTranspose2d( in_dim, out_dim, (1, prediction_steps) ) self.dropout = nn.Dropout(p=dropout) self.offset = offset self.balanced_classes = balanced_classes self.infonce = infonce def sample_negatives(self, y): bsz, fsz, tsz = y.shape y = y.transpose(0, 1) # BCT -> CBT y = y.contiguous().view(fsz, -1) # CBT => C(BxT) cross_high = tsz * bsz high = tsz if self.sample_distance is None else min(tsz, self.sample_distance) assert high > 1 neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz)) with torch.no_grad(): if self.n_negatives > 0: tszs = ( buffered_arange(tsz) .unsqueeze(-1) .expand(-1, self.n_negatives) .flatten() ) neg_idxs = torch.randint( low=0, high=high - 1, size=(bsz, self.n_negatives * tsz) ) neg_idxs[neg_idxs >= tszs] += 1 if self.cross_sample_negatives > 0: tszs = ( buffered_arange(tsz) .unsqueeze(-1) .expand(-1, self.cross_sample_negatives) .flatten() ) cross_neg_idxs = torch.randint( low=0, high=cross_high - 1, size=(bsz, self.cross_sample_negatives * tsz), ) cross_neg_idxs[cross_neg_idxs >= tszs] += 1 if self.n_negatives > 0: for i in range(1, bsz): neg_idxs[i] += i * high else: neg_idxs = cross_neg_idxs if self.cross_sample_negatives > 0 and self.n_negatives > 0: neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) negs = y[..., neg_idxs.view(-1)] negs = negs.view( fsz, bsz, self.n_negatives + self.cross_sample_negatives, tsz ).permute( 2, 1, 0, 3 ) # to NxBxCxT return negs def forward(self, x, y): x = x.unsqueeze(-1) x = self.project_to_steps(x) # BxCxTxS x = self.dropout(x) negatives = self.sample_negatives(y) y = y.unsqueeze(0) targets = torch.cat([y, negatives], dim=0) # Copies x B x C x T copies = targets.size(0) bsz, dim, tsz, steps = x.shape steps = min(steps, tsz - self.offset) predictions = x.new( bsz * copies * (tsz - self.offset + 1) * steps - ((steps + 1) * steps // 2) * copies * bsz ) if self.infonce: labels = predictions.new_full( (predictions.shape[0] // copies,), 0, dtype=torch.long ) else: labels = torch.zeros_like(predictions) weights = ( torch.full_like(labels, 1 / self.n_negatives) if self.balanced_classes and not self.infonce else None ) start = end = 0 for i in range(steps): offset = i + self.offset end = start + (tsz - offset) * bsz * copies if self.infonce: predictions[start:end] = torch.einsum( "bct,nbct->tbn", x[..., :-offset, i], targets[..., offset:] ).flatten() else: pos_num = (end - start) // copies predictions[start:end] = torch.einsum( "bct,nbct->nbt", x[..., :-offset, i], targets[..., offset:] ).flatten() labels[start : start + pos_num] = 1.0 if weights is not None: weights[start : start + pos_num] = 1.0 start = end assert end == predictions.numel(), "{} != {}".format(end, predictions.numel()) if self.infonce: predictions = predictions.view(-1, copies) else: if weights is not None: labels = (labels, weights) return predictions, labels @register_model_architecture("wav2vec", "wav2vec") def base_wav2vec_architecture(args): conv_feature_layers = "[(512, 10, 5)]" conv_feature_layers += " + [(512, 8, 4)]" conv_feature_layers += " + [(512, 4, 2)] * 3" args.conv_feature_layers = getattr(args, "conv_feature_layers", conv_feature_layers) args.conv_aggregator_layers = getattr( args, "conv_aggregator_layers", "[(512, 3, 1)] * 9" ) args.prediction_steps = getattr(args, "prediction_steps", 12) args.num_negatives = getattr(args, "num_negatives", 1) args.sample_distance = getattr(args, "sample_distance", None) args.cross_sample_negatives = getattr(args, "cross_sample_negatives", 0) args.dropout = getattr(args, "dropout", 0.0) args.dropout_features = getattr(args, "dropout_features", 0.0) args.dropout_agg = getattr(args, "dropout_agg", 0.0) args.encoder = getattr(args, "encoder", "cnn") args.aggregator = getattr(args, "aggregator", "cnn") args.skip_connections_feat = getattr(args, "skip_connections_feat", False) args.skip_connections_agg = getattr(args, "skip_connections_agg", False) args.residual_scale = getattr(args, "residual_scale", 0.5) args.gru_dim = getattr(args, "gru_dim", 512) args.no_conv_bias = getattr(args, "no_conv_bias", False) args.agg_zero_pad = getattr(args, "agg_zero_pad", False) args.log_compression = getattr(args, "log_compression", False) args.balanced_classes = getattr(args, "balanced_classes", False) args.infonce = getattr(args, "infonce", False) args.project_features = getattr(args, "project_features", "none") args.non_affine_group_norm = getattr(args, "non_affine_group_norm", False) args.offset = getattr(args, "offset", "auto") args.activation = getattr(args, "activation", "relu") args.vq_type = getattr(args, "vq_type", "none") args.vq_vars = getattr(args, "vq_vars", 320) args.vq_groups = getattr(args, "vq_groups", 2) args.vq_dim = getattr(args, "vq_dim", 0) args.vq_depth = getattr(args, "vq_depth", 1) args.combine_groups = getattr(args, "combine_groups", False) args.vq_temp = getattr(args, "vq_temp", "(2.0, 0.5, 0.999995)") args.vq_gamma = getattr(args, "vq_gamma", 0.25)
24,032
31.653533
131
py
RegularizedBN
RegularizedBN-main/fairseq/models/wav2vec/wav2vec2_asr.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils, tasks, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer def add_common_args(parser): parser.add_argument("--w2v-path", help="path to wav2vec 2.0 model") parser.add_argument( "--no-pretrained-weights", action="store_true", help="if true, does not load pretrained weights", ) parser.add_argument( "--dropout-input", type=float, metavar="D", help="dropout to apply to the input (after feat extr)", ) parser.add_argument( "--final-dropout", type=float, metavar="D", help="dropout after transformer and before final projection", ) parser.add_argument( "--apply-mask", action="store_true", help="apply masking during fine-tuning" ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability inside wav2vec 2.0 model", ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights inside wav2vec 2.0 model", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN inside wav2vec 2.0 model", ) parser.add_argument( "--mask-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-prob", type=float, help="probability of replacing a token with mask" ) parser.add_argument( "--mask-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--mask-channel-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-channel-prob", type=float, help="probability of replacing a token with mask", ) parser.add_argument( "--mask-channel-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-channel-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-channel-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--freeze-finetune-updates", default=0, type=int, help="dont finetune wav2vec for this many updates", ) parser.add_argument( "--feature-grad-mult", default=None, type=float, help="reset feature grad mult in wav2vec 2.0 to this", ) parser.add_argument( "--layerdrop", default=0.0, type=float, help="probability of dropping a layer in wav2vec 2.0", ) @register_model("wav2vec_ctc") class Wav2VecCtc(BaseFairseqModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" add_common_args(parser) def __init__(self, w2v_encoder, args): super().__init__() self.w2v_encoder = w2v_encoder self.args = args def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, args, task): """Build a new model instance.""" base_architecture(args) w2v_encoder = Wav2VecEncoder(args, task.target_dictionary) return cls(w2v_encoder, args) def get_normalized_probs(self, net_output, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" logits = net_output["encoder_out"] if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def forward(self, **kwargs): x = self.w2v_encoder(**kwargs) return x # def max_positions(self): # return None @register_model("wav2vec_seq2seq") class TransformerModel(FairseqEncoderDecoderModel): def __init__(self, args, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): add_common_args(parser) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN", ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="num decoder layers" ) parser.add_argument( "--decoder-layerdrop", type=float, metavar="D", help="decoder layerdrop chance", ) parser.add_argument( "--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads", ) parser.add_argument( "--decoder-learned-pos", action="store_true", help="use learned positional embeddings in the decoder", ) parser.add_argument( "--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block", ) parser.add_argument( "--no-token-positional-embeddings", default=False, action="store_true", help="if set, disables positional embeddings (outside self attention)", ) parser.add_argument( "--decoder-dropout", type=float, metavar="D", help="dropout probability in the decoder", ) parser.add_argument( "--decoder-attention-dropout", type=float, metavar="D", help="dropout probability for attention weights inside the decoder", ) parser.add_argument( "--decoder-activation-dropout", type=float, metavar="D", help="dropout probability after activation in FFN inside the decoder", ) # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, "max_source_positions"): args.max_source_positions = 2048 if not hasattr(args, "max_target_positions"): args.max_target_positions = 2048 src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) return emb decoder_embed_tokens = build_embedding(tgt_dict, args.decoder_embed_dim) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(args, encoder, decoder) @classmethod def build_encoder(cls, args): return Wav2VecEncoder(args) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, **kwargs): encoder_out = self.encoder(tbc=False, **kwargs) decoder_out = self.decoder(encoder_out=encoder_out, **kwargs) return decoder_out def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict class Wav2VecEncoder(FairseqEncoder): def __init__(self, args, tgt_dict=None): self.apply_mask = args.apply_mask arg_overrides = { "dropout": args.dropout, "activation_dropout": args.activation_dropout, "dropout_input": args.dropout_input, "attention_dropout": args.attention_dropout, "mask_length": args.mask_length, "mask_prob": args.mask_prob, "mask_selection": args.mask_selection, "mask_other": args.mask_other, "no_mask_overlap": args.no_mask_overlap, "mask_channel_length": args.mask_channel_length, "mask_channel_prob": args.mask_channel_prob, "mask_channel_selection": args.mask_channel_selection, "mask_channel_other": args.mask_channel_other, "no_mask_channel_overlap": args.no_mask_channel_overlap, "encoder_layerdrop": args.layerdrop, "feature_grad_mult": args.feature_grad_mult, } if getattr(args, "w2v_args", None) is None: state = checkpoint_utils.load_checkpoint_to_cpu( args.w2v_path, arg_overrides ) w2v_args = state["args"] else: state = None w2v_args = args.w2v_args assert args.normalize == w2v_args.normalize, 'Fine-tuning works best when data normalization is the same' w2v_args.data = args.data task = tasks.setup_task(w2v_args) model = task.build_model(w2v_args) if state is not None and not args.no_pretrained_weights: model.load_state_dict(state["model"], strict=True) model.remove_pretraining_modules() super().__init__(task.source_dictionary) d = w2v_args.encoder_embed_dim self.w2v_model = model self.final_dropout = nn.Dropout(args.final_dropout) self.freeze_finetune_updates = args.freeze_finetune_updates self.num_updates = 0 if tgt_dict is not None: self.proj = Linear(d, len(tgt_dict)) elif getattr(args, 'decoder_embed_dim', d) != d: self.proj = Linear(d, args.decoder_embed_dim) else: self.proj = None def set_num_updates(self, num_updates): """Set the number of parameters updates.""" super().set_num_updates(num_updates) self.num_updates = num_updates def forward(self, source, padding_mask, tbc=True, **kwargs): w2v_args = { "source": source, "padding_mask": padding_mask, "mask": self.apply_mask and self.training, } ft = self.freeze_finetune_updates <= self.num_updates with torch.no_grad() if not ft else contextlib.ExitStack(): x, padding_mask = self.w2v_model.extract_features(**w2v_args) if tbc: # B x T x C -> T x B x C x = x.transpose(0, 1) x = self.final_dropout(x) if self.proj: x = self.proj(x) return { "encoder_out": x, # T x B x C "encoder_padding_mask": padding_mask, # B x T "padding_mask": padding_mask, } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return None def upgrade_state_dict_named(self, state_dict, name): return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.dropout = args.decoder_dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_embed_dim args.encoder_embed_dim = embed_dim self.layerdrop = args.decoder_layerdrop padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None ) args = copy.deepcopy(args) args.dropout = args.decoder_dropout args.attention_dropout = args.decoder_attention_dropout args.activation_dropout = args.decoder_activation_dropout self.layers = nn.ModuleList([]) self.layers.extend( [ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(args.decoder_layers) ] ) if not self.share_input_output_embed: self.embed_out = nn.Parameter( torch.Tensor(len(dictionary), self.output_embed_dim) ) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr( args, "no_decoder_final_norm", False ): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ prev_output_tokens = prev_output_tokens.long() x, extra = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, attn, _ = layer( x, encoder_out["encoder_out"] if encoder_out is not None else None, encoder_out["encoder_padding_mask"] if encoder_out is not None else None, incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) return x, {"attn": attn, "inner_states": inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m @register_model_architecture("wav2vec_ctc", "wav2vec_ctc") def base_architecture(args): args.no_pretrained_weights = getattr(args, "no_pretrained_weights", False) args.dropout_input = getattr(args, "dropout_input", 0) args.final_dropout = getattr(args, "final_dropout", 0) args.apply_mask = getattr(args, "apply_mask", False) args.dropout = getattr(args, "dropout", 0) args.attention_dropout = getattr(args, "attention_dropout", 0) args.activation_dropout = getattr(args, "activation_dropout", 0) args.mask_length = getattr(args, "mask_length", 10) args.mask_prob = getattr(args, "mask_prob", 0.5) args.mask_selection = getattr(args, "mask_selection", "static") args.mask_other = getattr(args, "mask_other", 0) args.no_mask_overlap = getattr(args, "no_mask_overlap", False) args.mask_channel_length = getattr(args, "mask_channel_length", 10) args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5) args.mask_channel_selection = getattr(args, "mask_channel_selection", "static") args.mask_channel_other = getattr(args, "mask_channel_other", 0) args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False) args.freeze_finetune_updates = getattr(args, "freeze_finetune_updates", 0) args.feature_grad_mult = getattr(args, "feature_grad_mult", 0) args.layerdrop = getattr(args, "layerdrop", 0.0) @register_model_architecture("wav2vec_seq2seq", "wav2vec_seq2seq") def seq2seq_architecture(args): args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_layers = getattr(args, "decoder_layers", 10) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_dropout = getattr(args, "decoder_dropout", 0) args.decoder_attention_dropout = getattr(args, "decoder_attention_dropout", 0) args.decoder_activation_dropout = getattr(args, "decoder_activation_dropout", 0) args.share_decoder_input_output_embed = getattr(args, "share_decoder_input_output_embed", False) base_architecture(args)
22,473
32.344214
113
py
RegularizedBN
RegularizedBN-main/fairseq/models/wav2vec/wav2vec2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from typing import List, Tuple from fairseq import utils from fairseq.data.data_utils import compute_mask_indices from fairseq.models import BaseFairseqModel, register_model, register_model_architecture from fairseq.modules import ( Fp32GroupNorm, Fp32LayerNorm, GradMultiply, GumbelVectorQuantizer, LayerNorm, MultiheadAttention, SamePad, TransposeLast, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import buffered_arange @register_model("wav2vec2") class Wav2Vec2Model(BaseFairseqModel): @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--extractor-mode", choices=["default", "layer_norm"], help="mode for feature extractor. default has a single group norm with d groups in the first conv block, whereas layer_norm has layer norms in every block (meant to use with --normalize)", ) parser.add_argument( "--encoder-layers", type=int, metavar="L", help="num encoder layers in the transformer", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="H", help="encoder embedding dimension", ) parser.add_argument( "--encoder-ffn-embed-dim", type=int, metavar="F", help="encoder embedding dimension for FFN", ) parser.add_argument( "--encoder-attention-heads", type=int, metavar="A", help="num encoder attention heads", ) parser.add_argument( "--activation-fn", choices=utils.get_available_activation_fns(), help="activation function to use", ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability for the transformer", ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights", ) parser.add_argument( "--activation-dropout", type=float, metavar="D", help="dropout probability after activation in FFN", ) parser.add_argument( "--final-dim", type=int, metavar="D", help="project final representations and targets to this many dimensions", ) parser.add_argument( "--layer-norm-first", action="store_true", help="apply layernorm first in the transformer", ) parser.add_argument( "--encoder-layerdrop", type=float, help="probability of dropping a tarnsformer layer", ) parser.add_argument( "--conv-feature-layers", type=str, metavar="EXPR", help="convolutional feature extraction layers [(dim, kernel_size, stride), ...]", ) parser.add_argument( "--logit-temp", type=float, help="temperature to divide logits by" ) parser.add_argument( "--quantize-targets", action="store_true", help="use quantized targets" ) parser.add_argument( "--quantize-input", action="store_true", help="use quantized inputs" ) parser.add_argument( "--feature-grad-mult", type=float, help="multiply feature extractor var grads by this", ) parser.add_argument( "--latent-vars", type=int, metavar="N", help="number of latent variables V in each group of the codebook", ) parser.add_argument( "--latent-groups", type=int, metavar="N", help="number of groups G of latent variables in the codebook", ) parser.add_argument( "--latent-dim", type=int, metavar="N", help="if set, uses this dimensionality for latent variables. otherwise uses final_dim / latent_groups", ) parser.add_argument("--mask-length", type=int, help="mask length") parser.add_argument( "--mask-prob", type=float, help="probability of replacing a token with mask" ) parser.add_argument( "--mask-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-other", type=float, help="secondary mask argument (used for more complex distributions), see help in compute_mask_indices", ) parser.add_argument( "--no-mask-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--mask-min-space", type=int, help="min space between spans (if no overlap is enabled)", ) parser.add_argument( "--mask-channel-length", type=int, help="repeat the mask indices multiple times", ) parser.add_argument( "--mask-channel-prob", type=float, help="probability of replacing a token with mask", ) parser.add_argument( "--mask-channel-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-channel-other", type=float, help="secondary mask argument (used for more complex distributions), see help in compute_mask_indices", ) parser.add_argument( "--no-mask-channel-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--mask-channel-min-space", type=int, help="min space between spans (if no overlap is enabled)", ) parser.add_argument( "--dropout-input", type=float, metavar="D", help="dropout to apply to the input (after feat extr)", ) parser.add_argument( "--dropout-features", type=float, metavar="D", help="dropout to apply to the features (after feat extr)", ) parser.add_argument( "--num-negatives", type=int, metavar="N", help="number of negative examples" ) parser.add_argument( "--negatives-from-everywhere", action="store_true", help="sample negatives from everywhere, not just masked states", ) parser.add_argument( "--cross-sample-negatives", type=int, metavar="N", help="num of cross sampled negatives", ) parser.add_argument( "--codebook-negatives", type=int, metavar="N", help="num of codebook sampled negatives", ) parser.add_argument( "--conv-pos", type=int, metavar="N", help="number of filters for convolutional positional embeddings", ) parser.add_argument( "--conv-pos-groups", type=int, metavar="N", help="number of groups for convolutional positional embedding", ) parser.add_argument( "--latent-temp", type=str, metavar="D", help="temperature for latent variable sampling. can be tuple of 3 values (start, end, decay)", ) parser.add_argument( "--target-glu", action="store_true", help="adds projection + glu to targets" ) parser.add_argument( "--conv-bias", action="store_true", help="include bias in conv encoder" ) def __init__(self, args): super().__init__() self.args = args feature_enc_layers = eval(args.conv_feature_layers) self.embed = feature_enc_layers[-1][0] self.feature_extractor = ConvFeatureExtractionModel( conv_layers=feature_enc_layers, dropout=0.0, mode=args.extractor_mode, conv_bias=args.conv_bias, ) self.post_extract_proj = ( nn.Linear(self.embed, args.encoder_embed_dim) if self.embed != args.encoder_embed_dim and not args.quantize_input else None ) self.mask_prob = args.mask_prob self.mask_selection = args.mask_selection self.mask_other = args.mask_other self.mask_length = args.mask_length self.no_mask_overlap = args.no_mask_overlap self.mask_min_space = args.mask_min_space self.mask_channel_prob = args.mask_channel_prob self.mask_channel_selection = args.mask_channel_selection self.mask_channel_other = args.mask_channel_other self.mask_channel_length = args.mask_channel_length self.no_mask_channel_overlap = args.no_mask_channel_overlap self.mask_channel_min_space = args.mask_channel_min_space self.dropout_input = nn.Dropout(args.dropout_input) self.dropout_features = nn.Dropout(args.dropout_features) self.feature_grad_mult = args.feature_grad_mult self.quantizer = None self.input_quantizer = None self.n_negatives = args.num_negatives self.cross_sample_negatives = args.cross_sample_negatives self.codebook_negatives = args.codebook_negatives self.negatives_from_everywhere = args.negatives_from_everywhere self.logit_temp = args.logit_temp if args.quantize_input: vq_dim = args.latent_dim if args.latent_dim > 0 else args.encoder_embed_dim self.input_quantizer = ( GumbelVectorQuantizer( dim=args.encoder_embed_dim, num_vars=args.latent_vars, temp=eval(args.latent_temp), groups=args.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, ) if not args.same_quantizer else self.quantizer ) self.project_inp = nn.Linear(vq_dim, args.encoder_embed_dim) final_dim = args.final_dim if args.final_dim > 0 else args.encoder_embed_dim if args.quantize_targets: vq_dim = args.latent_dim if args.latent_dim > 0 else final_dim self.quantizer = GumbelVectorQuantizer( dim=self.embed, num_vars=args.latent_vars, temp=eval(args.latent_temp), groups=args.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, ) self.project_q = nn.Linear(vq_dim, final_dim) else: self.project_q = nn.Linear(self.embed, final_dim) self.mask_emb = nn.Parameter( torch.FloatTensor(args.encoder_embed_dim).uniform_() ) self.encoder = TransformerEncoder(args) self.layer_norm = LayerNorm(self.embed) self.target_glu = None if args.target_glu: self.target_glu = nn.Sequential( nn.Linear(final_dim, final_dim * 2), nn.GLU() ) self.final_proj = nn.Linear(args.encoder_embed_dim, final_dim) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict @classmethod def build_model(cls, args, task=None): """Build a new model instance.""" # make sure all arguments are present base_architecture(args) return cls(args) def apply_mask(self, x, padding_mask): B, T, C = x.shape if self.mask_prob > 0: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_prob, self.mask_length, self.mask_selection, self.mask_other, min_masks=2, no_overlap=self.no_mask_overlap, min_space=self.mask_min_space, ) mask_indices = torch.from_numpy(mask_indices).to(x.device) x[mask_indices] = self.mask_emb else: mask_indices = None if self.mask_channel_prob > 0: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space, ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices) .to(x.device) .unsqueeze(1) .expand(-1, T, -1) ) x[mask_channel_indices] = 0 return x, mask_indices def sample_negatives(self, y, num): if self.n_negatives == 0 and self.cross_sample_negatives == 0: return y.new(0) bsz, tsz, fsz = y.shape y = y.view(-1, fsz) # BTC => (BxT)C cross_high = tsz * bsz high = tsz with torch.no_grad(): assert high > 1, f"{bsz,tsz,fsz}" if self.n_negatives > 0: tszs = ( buffered_arange(num) .unsqueeze(-1) .expand(-1, self.n_negatives) .flatten() ) neg_idxs = torch.randint( low=0, high=high - 1, size=(bsz, self.n_negatives * num) ) neg_idxs[neg_idxs >= tszs] += 1 if self.cross_sample_negatives > 0: tszs = ( buffered_arange(num) .unsqueeze(-1) .expand(-1, self.cross_sample_negatives) .flatten() ) cross_neg_idxs = torch.randint( low=0, high=cross_high - 1, size=(bsz, self.cross_sample_negatives * num), ) cross_neg_idxs[cross_neg_idxs >= tszs] += 1 if self.n_negatives > 0: for i in range(1, bsz): neg_idxs[i] += i * high else: neg_idxs = cross_neg_idxs if self.cross_sample_negatives > 0 and self.n_negatives > 0: neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) negs = y[neg_idxs.view(-1)] negs = negs.view( bsz, num, self.n_negatives + self.cross_sample_negatives, fsz ).permute( 2, 0, 1, 3 ) # to NxBxTxC return negs, neg_idxs def compute_preds(self, x, y, negatives): neg_is_pos = (y == negatives).all(-1) y = y.unsqueeze(0) targets = torch.cat([y, negatives], dim=0) logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x) logits /= self.logit_temp if neg_is_pos.any(): logits[1:][neg_is_pos] = float("-inf") return logits def forward(self, source, padding_mask=None, mask=True, features_only=False): if self.feature_grad_mult > 0: features = self.feature_extractor(source) if self.feature_grad_mult != 1.0: features = GradMultiply.apply(features, self.feature_grad_mult) else: with torch.no_grad(): features = self.feature_extractor(source) features_pen = features.float().pow(2).mean() features = features.transpose(1, 2) features = self.layer_norm(features) unmasked_features = features.clone() if padding_mask is not None: extra = padding_mask.size(1) % features.size(1) if extra > 0: padding_mask = padding_mask[:, :-extra] padding_mask = padding_mask.view(padding_mask.size(0), features.size(1), -1) padding_mask = padding_mask.all(-1) if self.post_extract_proj is not None: features = self.post_extract_proj(features) features = self.dropout_input(features) unmasked_features = self.dropout_features(unmasked_features) num_vars = None code_ppl = None prob_ppl = None curr_temp = None if self.input_quantizer: q = self.input_quantizer(features, produce_targets=False) features = q["x"] num_vars = q["num_vars"] code_ppl = q["code_perplexity"] prob_ppl = q["prob_perplexity"] curr_temp = q["temp"] features = self.project_inp(features) if mask: x, mask_indices = self.apply_mask(features, padding_mask) if mask_indices is not None: y = unmasked_features[mask_indices].view(unmasked_features.size(0), -1, unmasked_features.size(-1)) else: y = unmasked_features else: x = features y = unmasked_features mask_indices = None x = self.encoder(x, padding_mask=padding_mask) if features_only: return {"x": x, "padding_mask": padding_mask} if self.quantizer: q = self.quantizer(y, produce_targets=False) y = q["x"] num_vars = q["num_vars"] code_ppl = q["code_perplexity"] prob_ppl = q["prob_perplexity"] curr_temp = q["temp"] y = self.project_q(y) if self.negatives_from_everywhere: neg_cands, *_ = self.quantizer(unmasked_features, produce_targets=False) negs, _ = self.sample_negatives(neg_cands, y.size(1)) negs = self.project_q(negs) else: negs, _ = self.sample_negatives(y, y.size(1)) if self.codebook_negatives > 0: cb_negs = self.quantizer.sample_from_codebook( y.size(0) * y.size(1), self.codebook_negatives ) cb_negs = cb_negs.view( self.codebook_negatives, y.size(0), y.size(1), -1 ) # order doesnt matter cb_negs = self.project_q(cb_negs) negs = torch.cat([negs, cb_negs], dim=0) else: y = self.project_q(y) if self.negatives_from_everywhere: negs, _ = self.sample_negatives(unmasked_features, y.size(1)) negs = self.project_q(negs) else: negs, _ = self.sample_negatives(y, y.size(1)) x = x[mask_indices].view(x.size(0), -1, x.size(-1)) if self.target_glu: y = self.target_glu(y) negs = self.target_glu(negs) x = self.final_proj(x) x = self.compute_preds(x, y, negs) result = {"x": x, "padding_mask": padding_mask, "features_pen": features_pen} if prob_ppl is not None: result["prob_perplexity"] = prob_ppl result["code_perplexity"] = code_ppl result["num_vars"] = num_vars result["temp"] = curr_temp return result def quantize(self, x): assert self.quantizer is not None x = self.feature_extractor(x) x = x.transpose(1, 2) x = self.layer_norm(x) return self.quantizer.forward_idx(x) def extract_features(self, source, padding_mask, mask=False): res = self.forward(source, padding_mask, mask=mask, features_only=True) return res["x"], res["padding_mask"] def get_logits(self, net_output): logits = net_output["x"] logits = logits.transpose(0, 2) logits = logits.reshape(-1, logits.size(-1)) return logits def get_targets(self, sample, net_output, expand_steps=True): x = net_output["x"] return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long) def get_extra_losses(self, net_output): pen = [] if "prob_perplexity" in net_output: pen.append( (net_output["num_vars"] - net_output["prob_perplexity"]) / net_output["num_vars"] ) if "features_pen" in net_output: pen.append(net_output["features_pen"]) return pen def remove_pretraining_modules(self): self.quantizer = None self.project_q = None self.target_glu = None self.final_proj = None class ConvFeatureExtractionModel(nn.Module): def __init__( self, conv_layers: List[Tuple[int, int, int]], dropout: float = 0.0, mode: str = "default", conv_bias: bool = False, ): super().__init__() assert mode in {"default", "layer_norm"} def block( n_in, n_out, k, stride, is_layer_norm=False, is_group_norm=False, conv_bias=False, ): def make_conv(): conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias) nn.init.kaiming_normal_(conv.weight) return conv assert ( is_layer_norm and is_group_norm ) == False, "layer norm and group norm are exclusive" if is_layer_norm: return nn.Sequential( make_conv(), nn.Dropout(p=dropout), nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=True), TransposeLast(), ), nn.GELU(), ) elif is_group_norm: return nn.Sequential( make_conv(), nn.Dropout(p=dropout), Fp32GroupNorm(dim, dim, affine=True), nn.GELU(), ) else: return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU()) in_d = 1 self.conv_layers = nn.ModuleList() for i, cl in enumerate(conv_layers): assert len(cl) == 3, "invalid conv definition: " + str(cl) (dim, k, stride) = cl self.conv_layers.append( block( in_d, dim, k, stride, is_layer_norm=mode == "layer_norm", is_group_norm=mode == "default" and i == 0, conv_bias=conv_bias, ) ) in_d = dim def forward(self, x): # BxT -> BxCxT x = x.unsqueeze(1) for conv in self.conv_layers: x = conv(x) return x class TransformerEncoder(nn.Module): def __init__(self, args): super().__init__() self.dropout = args.dropout self.embedding_dim = args.encoder_embed_dim self.pos_conv = nn.Conv1d( self.embedding_dim, self.embedding_dim, kernel_size=args.conv_pos, padding=args.conv_pos // 2, groups=args.conv_pos_groups, ) dropout = 0 std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim)) nn.init.normal_(self.pos_conv.weight, mean=0, std=std) nn.init.constant_(self.pos_conv.bias, 0) self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2) self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU()) self.layers = nn.ModuleList( [ TransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=self.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_fn=args.activation_fn, layer_norm_first=args.layer_norm_first, ) for _ in range(args.encoder_layers) ] ) self.layer_norm_first = args.layer_norm_first self.layer_norm = LayerNorm(self.embedding_dim) self.layerdrop = args.encoder_layerdrop self.apply(init_bert_params) def forward(self, x, padding_mask=None): x = self.extract_features(x, padding_mask) if self.layer_norm_first: x = self.layer_norm(x) return x def extract_features(self, x, padding_mask=None): if padding_mask is not None: x[padding_mask] = 0 x_conv = self.pos_conv(x.transpose(1, 2)) x_conv = x_conv.transpose(1, 2) x += x_conv if not self.layer_norm_first: x = self.layer_norm(x) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) layer_results = [] for i, layer in enumerate(self.layers): dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False) layer_results.append(x) # T x B x C -> B x T x C x = x.transpose(0, 1) return x def max_positions(self): """Maximum output length supported by the encoder.""" return self.args.max_positions def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" return state_dict class TransformerSentenceEncoderLayer(nn.Module): """ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__( self, embedding_dim: float = 768, ffn_embedding_dim: float = 3072, num_attention_heads: float = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", layer_norm_first: bool = False, ) -> None: super().__init__() # Initialize parameters self.embedding_dim = embedding_dim self.dropout = dropout self.activation_dropout = activation_dropout # Initialize blocks self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = MultiheadAttention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, ) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(self.activation_dropout) self.dropout3 = nn.Dropout(dropout) self.layer_norm_first = layer_norm_first # layer norm associated with the self attention layer self.self_attn_layer_norm = LayerNorm(self.embedding_dim) self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim) self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim) # layer norm associated with the position wise feed-forward NN self.final_layer_norm = LayerNorm(self.embedding_dim) def forward( self, x: torch.Tensor, self_attn_mask: torch.Tensor = None, self_attn_padding_mask: torch.Tensor = None, need_weights: bool = False, att_args=None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer imlementation. """ residual = x if self.layer_norm_first: x = self.self_attn_layer_norm(x) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask, ) x = self.dropout1(x) x = residual + x residual = x x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual + x else: x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=need_weights, ) x = self.dropout1(x) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual + x x = self.final_layer_norm(x) return x, attn @register_model_architecture("wav2vec2", "wav2vec2") def base_architecture(args): args.extractor_mode = getattr(args, "extractor_mode", "default") args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.activation_fn = getattr(args, "activation_fn", "gelu") args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.final_dim = getattr(args, "final_dim", 0) args.layer_norm_first = getattr(args, "layer_norm_first", False) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.0) conv_feature_layers = "[(512, 10, 5)]" conv_feature_layers += " + [(512, 8, 4)]" conv_feature_layers += " + [(512, 4, 2)] * 3" conv_feature_layers += " + [(512, 1, 1)]" args.conv_feature_layers = getattr(args, "conv_feature_layers", conv_feature_layers) args.logit_temp = getattr(args, "logit_temp", 0.1) args.quantize_targets = getattr(args, "quantize_targets", False) args.quantize_input = getattr(args, "quantize_input", False) args.feature_grad_mult = getattr(args, "feature_grad_mult", 1.0) args.latent_vars = getattr(args, "latent_vars", 320) args.latent_groups = getattr(args, "latent_groups", 2) args.latent_dim = getattr(args, "latent_dim", 0) args.mask_length = getattr(args, "mask_length", 10) args.mask_prob = getattr(args, "mask_prob", 0.65) args.mask_selection = getattr(args, "mask_selection", "static") args.mask_other = getattr(args, "mask_other", 0) args.no_mask_overlap = getattr(args, "no_mask_overlap", False) args.mask_min_space = getattr(args, "mask_min_space", 1) args.mask_channel_length = getattr(args, "mask_channel_length", 10) args.mask_channel_prob = getattr(args, "mask_channel_prob", 0) args.mask_channel_selection = getattr(args, "mask_channel_selection", "static") args.mask_channel_other = getattr(args, "mask_channel_other", 0) args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False) args.mask_channel_min_space = getattr(args, "mask_channel_min_space", 1) args.dropout_input = getattr(args, "dropout_input", 0) args.dropout_features = getattr(args, "dropout_features", 0) args.num_negatives = getattr(args, "num_negatives", 100) args.negatives_from_everywhere = getattr(args, "negatives_from_everywhere", False) args.cross_sample_negatives = getattr(args, "cross_sample_negatives", 0) args.codebook_negatives = getattr(args, "codebook_negatives", 0) args.conv_pos = getattr(args, "conv_pos", 128) args.conv_pos_groups = getattr(args, "conv_pos_groups", 16) args.latent_temp = getattr(args, "latent_temp", "(2,0.5,0.999995)") args.target_glu = getattr(args, "target_glu", False) args.conv_bias = getattr(args, "conv_bias", False)
33,259
31.575906
200
py
RegularizedBN
RegularizedBN-main/fairseq/models/bart/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import copy import logging import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from typing import List from fairseq import utils from fairseq.data import encoders logger = logging.getLogger(__name__) class BARTHubInterface(nn.Module): """A simple PyTorch Hub interface to BART. Usage: https://github.com/pytorch/fairseq/tree/master/examples/bart """ def __init__(self, args, task, model): super().__init__() self.args = args self.task = task self.model = model self.bpe = encoders.build_bpe(args) self.max_positions = min(utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), )) # this is useful for determining the device self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def encode(self, sentence: str, *addl_sentences, no_separator=True) -> torch.LongTensor: """ BPE-encode a sentence (or multiple sentences). Every sequence begins with a beginning-of-sentence (`<s>`) symbol. Every sentence ends with an end-of-sentence (`</s>`). Example (single sentence): `<s> a b c </s>` Example (sentence pair): `<s> d e f </s> 1 2 3 </s>` The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE requires leading spaces. For example:: >>> bart.encode('Hello world').tolist() [0, 31414, 232, 2] >>> bart.encode(' world').tolist() [0, 232, 2] >>> bart.encode('world').tolist() [0, 8331, 2] """ tokens = self.bpe.encode(sentence) if len(tokens.split(' ')) > self.max_positions - 2: tokens = ' '.join(tokens.split(' ')[:self.max_positions - 2]) bpe_sentence = '<s> ' + tokens + ' </s>' for s in addl_sentences: bpe_sentence += (' </s>' if not no_separator else '') bpe_sentence += ' ' + self.bpe.encode(s) + ' </s>' tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False) return tokens.long() def decode(self, tokens: torch.LongTensor): assert tokens.dim() == 1 tokens = tokens.cpu().numpy() if tokens[0] == self.task.source_dictionary.bos(): tokens = tokens[1:] # remove <s> eos_mask = (tokens == self.task.source_dictionary.eos()) doc_mask = eos_mask[1:] & eos_mask[:-1] sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) sentences = [self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences] if len(sentences) == 1: return sentences[0] return sentences def _build_sample(self, src_tokens: List[torch.LongTensor]): # assert torch.is_tensor(src_tokens) dataset = self.task.build_dataset_for_inference( src_tokens, [x.numel() for x in src_tokens], ) sample = dataset.collater(dataset) sample = utils.apply_to_sample( lambda tensor: tensor.to(self.device), sample ) return sample def sample(self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs) -> str: input = [self.encode(sentence) for sentence in sentences] hypos = self.generate(input, beam, verbose, **kwargs) return [self.decode(x['tokens']) for x in hypos] def generate(self, tokens: List[torch.LongTensor], beam: int = 5, verbose: bool = False, **kwargs) -> torch.LongTensor: sample = self._build_sample(tokens) # build generator using current args as well as any kwargs gen_args = copy.copy(self.args) gen_args.beam = beam for k, v in kwargs.items(): setattr(gen_args, k, v) generator = self.task.build_generator([self.model], gen_args) translations = self.task.inference_step( generator, [self.model], sample, prefix_tokens=sample['net_input']['src_tokens'].new_zeros((len(tokens), 1)).fill_(self.task.source_dictionary.bos()), ) if verbose: src_str_with_unk = self.string(tokens) logger.info('S\t{}'.format(src_str_with_unk)) def getarg(name, default): return getattr(gen_args, name, getattr(self.args, name, default)) # Process top predictions hypos = [x[0] for x in translations] hypos = [v for _, v in sorted(zip(sample['id'].tolist(), hypos))] return hypos def extract_features(self, tokens: torch.LongTensor, return_all_hiddens: bool = False) -> torch.Tensor: if tokens.dim() == 1: tokens = tokens.unsqueeze(0) if tokens.size(-1) > min(self.model.max_positions()): raise ValueError('tokens exceeds maximum length: {} > {}'.format( tokens.size(-1), self.model.max_positions() )) tokens.to(device=self.device), prev_output_tokens = tokens.clone() prev_output_tokens[:, 0] = tokens.gather( 1, (tokens.ne(self.task.source_dictionary.pad()).sum(dim=1)- 1).unsqueeze(-1), ).squeeze() prev_output_tokens[:, 1:] = tokens[:, :-1] features, extra = self.model( src_tokens=tokens, src_lengths=None, prev_output_tokens=prev_output_tokens, features_only=True, return_all_hiddens=return_all_hiddens, ) if return_all_hiddens: # convert from T x B x C -> B x T x C inner_states = extra['inner_states'] return [inner_state.transpose(0, 1) for inner_state in inner_states] else: return features # just the last layer's features def register_classification_head( self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs ): self.model.register_classification_head( name, num_classes=num_classes, embedding_size=embedding_size, **kwargs ) def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): if tokens.dim() == 1: tokens = tokens.unsqueeze(0) features = self.extract_features(tokens.to(device=self.device)) sentence_representation = features[ tokens.eq(self.task.source_dictionary.eos()), : ].view(features.size(0), -1, features.size(-1))[:, -1, :] logits = self.model.classification_heads[head](sentence_representation) if return_logits: return logits return F.log_softmax(logits, dim=-1)
6,947
36.15508
129
py
RegularizedBN
RegularizedBN-main/fairseq/models/bart/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension """ import logging import torch import torch.nn as nn from fairseq import utils from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import BARTHubInterface logger = logging.getLogger(__name__) @register_model('bart') class BARTModel(TransformerModel): @classmethod def hub_models(cls): return { 'bart.base': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz', 'bart.large': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz', 'bart.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz', 'bart.large.cnn': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz', 'bart.large.xsum': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz', } def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): super(BARTModel, BARTModel).add_args(parser) parser.add_argument( '--pooler-dropout', type=float, metavar='D', help='dropout probability in the masked_lm pooler layers' ) parser.add_argument( '--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use for pooler layer' ) @property def supported_targets(self): return {'self'} def forward( self, src_tokens, src_lengths, prev_output_tokens, features_only=False, classification_head_name=None, **kwargs ): if classification_head_name is not None: features_only = True encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, **kwargs, ) x, extra = self.decoder( prev_output_tokens, encoder_out=encoder_out, features_only=features_only, **kwargs, ) if classification_head_name is not None: sentence_representation = x[ src_tokens.eq(self.encoder.dictionary.eos()), : ].view(x.size(0), -1, x.size(-1))[:, -1, :] x = self.classification_heads[classification_head_name]( sentence_representation ) return x, extra @classmethod def from_pretrained( cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='gpt2', **kwargs, ): from fairseq import hub_utils x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) return BARTHubInterface(x['args'], x['task'], x['models'][0]) def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs): """Register a classification head.""" logger.info("Registering classification head: {0}".format(name)) if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' 'and inner_dim {} (prev: {})'.format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = BARTClassificationHead( self.args.encoder_embed_dim, inner_dim or self.args.encoder_embed_dim, num_classes, self.args.pooler_activation_fn, self.args.pooler_dropout, ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + '.' if name != '' else '' current_head_names = [] if not hasattr(self, 'classification_heads') else \ self.classification_heads.keys() # Handle new classification heads present in the state dict. keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + 'classification_heads.'): continue head_name = k[len(prefix + 'classification_heads.'):].split('.')[0] num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0) inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0) if getattr(self.args, 'load_checkpoint_heads', False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: logger.warning( 'deleting classification head ({}) from checkpoint ' 'not present in current model: {}'.format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): logger.warning( 'deleting classification head ({}) from checkpoint ' 'with different dimensions than current model: {}'.format(head_name, k) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] def truncate_emb(key): if key in state_dict: state_dict[key] = state_dict[key][:-1, :] # When finetuning on translation task, remove last row of # embedding matrix that corresponds to mask_idx token. loaded_dict_size = state_dict['encoder.embed_tokens.weight'].size(0) if loaded_dict_size == len(self.encoder.dictionary) + 1 and '<mask>' not in self.encoder.dictionary: truncate_emb('encoder.embed_tokens.weight') truncate_emb('decoder.embed_tokens.weight') truncate_emb('encoder.output_projection.weight') truncate_emb('decoder.output_projection.weight') # When continued pretraining on new set of languages for mbart, # add extra lang embeddings at the end of embed_tokens. # Note: newly added languages are assumed to have been added at the end. if self.args.task == 'multilingual_denoising' and loaded_dict_size < len(self.encoder.dictionary): logger.info( "Adding extra language embeddings not found in pretrained model for "\ "continued pretraining of MBART on new set of languages." ) loaded_mask_token_embedding = state_dict['encoder.embed_tokens.weight'][-1, :] num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size embed_dim = state_dict['encoder.embed_tokens.weight'].size(1) new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim) nn.init.normal_( new_lang_embed_to_add, mean=0, std=embed_dim ** -0.5 ) new_lang_embed_to_add = new_lang_embed_to_add.to( dtype=state_dict['encoder.embed_tokens.weight'].dtype, ) state_dict['encoder.embed_tokens.weight'] = torch.cat([ state_dict['encoder.embed_tokens.weight'][:loaded_dict_size-1, :], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0)] ) state_dict['decoder.embed_tokens.weight'] = torch.cat([ state_dict['decoder.embed_tokens.weight'][:loaded_dict_size-1, :], new_lang_embed_to_add, loaded_mask_token_embedding.unsqueeze(0)] ) # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, 'classification_heads'): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + 'classification_heads.' + k not in state_dict: logger.info('Overwriting', prefix + 'classification_heads.' + k) state_dict[prefix + 'classification_heads.' + k] = v class BARTClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x @register_model_architecture('bart', 'bart_large') def bart_large_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*1024) args.encoder_layers = getattr(args, 'encoder_layers', 12) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.relu_dropout = getattr(args, 'relu_dropout', 0.) args.dropout = getattr(args, 'dropout', 0.1) args.max_target_positions = getattr(args, 'max_target_positions', 1024) args.max_source_positions = getattr(args, 'max_source_positions', 1024) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True) args.share_all_embeddings = getattr(args, 'share_all_embeddings', True) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.no_scale_embedding = getattr(args, 'no_scale_embedding', True) args.layernorm_embedding = getattr(args, 'layernorm_embedding', True) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0) @register_model_architecture('bart', 'bart_base') def bart_base_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*768) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12) bart_large_architecture(args) @register_model_architecture('bart', 'mbart_large') def mbart_large_architecture(args): args.no_scale_embedding = getattr(args, 'no_scale_embedding', False) bart_large_architecture(args) @register_model_architecture('bart', 'mbart_base') def mbart_base_architecture(args): args.no_scale_embedding = getattr(args, 'no_scale_embedding', False) bart_base_architecture(args) @register_model_architecture('bart', 'mbart_base_wmt20') def mbart_base_wmt20_architecture(args): args.layernorm_embedding = getattr(args, 'layernorm_embedding', False) mbart_base_architecture(args)
13,648
41.126543
111
py
RegularizedBN
RegularizedBN-main/fairseq/models/nat/levenshtein_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.utils import new_arange # -------------- Helper Functions --------------------------------------------------- # def load_libnat(): try: from fairseq import libnat_cuda return libnat_cuda, True except ImportError as e: print(str(e) + '... fall back to CPU version') try: from fairseq import libnat return libnat, False except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n") raise e def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx): libnat, use_cuda = load_libnat() def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx): in_masks = in_tokens.ne(padding_idx) out_masks = out_tokens.ne(padding_idx) mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels( out_tokens.int(), libnat.levenshtein_distance( in_tokens.int(), out_tokens.int(), in_masks.sum(1).int(), out_masks.sum(1).int() ) ) masked_tgt_masks = masked_tgt_masks.bool() & out_masks mask_ins_targets = mask_ins_targets.type_as( in_tokens)[:, 1:in_masks.size(1)].masked_fill_(~in_masks[:, 1:], 0) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx): in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] # generate labels masked_tgt_masks = [] for mask_input in mask_inputs: mask_label = [] for beam_size in mask_input[1:-1]: # HACK 1:-1 mask_label += [0] + [1 for _ in range(beam_size)] masked_tgt_masks.append( mask_label + [0 for _ in range(out_seq_len - len(mask_label))] ) mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor masked_tgt_masks = torch.tensor( masked_tgt_masks, device=out_tokens.device ).bool() mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets if use_cuda: return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx) return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx) def _get_del_targets(in_tokens, out_tokens, padding_idx): libnat, use_cuda = load_libnat() def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx): in_masks = in_tokens.ne(padding_idx) out_masks = out_tokens.ne(padding_idx) word_del_targets = libnat.generate_deletion_labels( in_tokens.int(), libnat.levenshtein_distance( in_tokens.int(), out_tokens.int(), in_masks.sum(1).int(), out_masks.sum(1).int() ) ) word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_(~in_masks, 0) return word_del_targets def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx): out_seq_len = out_tokens.size(1) with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] # transform to tensor word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device) return word_del_targets if use_cuda: return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx) return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx) def _apply_ins_masks( in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx ): in_masks = in_tokens.ne(padding_idx) in_lengths = in_masks.sum(1) # HACK: hacky way to shift all the paddings to eos first. in_tokens.masked_fill_(~in_masks, eos_idx) mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0) out_lengths = in_lengths + mask_ins_pred.sum(1) out_max_len = out_lengths.max() out_masks = ( new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None] ) reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1) out_tokens = ( in_tokens.new_zeros(in_tokens.size(0), out_max_len) .fill_(padding_idx) .masked_fill_(out_masks, unk_idx) ) out_tokens[:, 0] = in_tokens[:, 0] out_tokens.scatter_(1, reordering, in_tokens[:, 1:]) out_scores = None if in_scores is not None: in_scores.masked_fill_(~in_masks, 0) out_scores = in_scores.new_zeros(*out_tokens.size()) out_scores[:, 0] = in_scores[:, 0] out_scores.scatter_(1, reordering, in_scores[:, 1:]) return out_tokens, out_scores def _apply_ins_words( in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx ): word_ins_masks = in_tokens.eq(unk_idx) out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks]) if in_scores is not None: out_scores = in_scores.masked_scatter( word_ins_masks, word_ins_scores[word_ins_masks] ) else: out_scores = None return out_tokens, out_scores def _apply_del_words( in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx ): # apply deletion to a tensor in_masks = in_tokens.ne(padding_idx) bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx) max_len = in_tokens.size(1) word_del_pred.masked_fill_(~in_masks, 1) word_del_pred.masked_fill_(bos_eos_masks, 0) reordering = ( new_arange(in_tokens) .masked_fill_(word_del_pred, max_len) .sort(1)[1] ) out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering) out_scores = None if in_scores is not None: out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering) out_attn = None if in_attn is not None: _mask = word_del_pred[:, :, None].expand_as(in_attn) _reordering = reordering[:, :, None].expand_as(in_attn) out_attn = in_attn.masked_fill(_mask, 0.).gather(1, _reordering) return out_tokens, out_scores, out_attn def _skip(x, mask): """ Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors. """ if isinstance(x, int): return x if x is None: return None if isinstance(x, torch.Tensor): if x.size(0) == mask.size(0): return x[mask] elif x.size(1) == mask.size(0): return x[:, mask] if isinstance(x, list): return [_skip(x_i, mask) for x_i in x] if isinstance(x, dict): return {k: _skip(v, mask) for k, v in x.items()} raise NotImplementedError def _skip_encoder_out(encoder, encoder_out, mask): if not mask.any(): return encoder_out else: return encoder.reorder_encoder_out(encoder_out, mask.nonzero(as_tuple=False).squeeze()) def _fill(x, mask, y, padding_idx): """ Filling tensor x with y at masked positions (dim=0). """ if x is None: return y assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): dims = [x.size(0), y.size(1) - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1) x[mask] = y elif x.size(1) > y.size(1): x[mask] = padding_idx if x.dim() == 2: x[mask, :y.size(1)] = y else: x[mask, :y.size(1), :] = y else: x[mask] = y return x
9,372
31.887719
103
py
RegularizedBN
RegularizedBN-main/fairseq/models/nat/levenshtein_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoderLayer ) from fairseq.models.nat import ( FairseqNATModel, FairseqNATDecoder, ensemble_decoder ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from .levenshtein_utils import ( _skip, _skip_encoder_out, _fill, _get_ins_targets, _get_del_targets, _apply_ins_masks, _apply_ins_words, _apply_del_words ) @register_model("levenshtein_transformer") class LevenshteinTransformerModel(FairseqNATModel): @property def allow_length_beam(self): return False @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) parser.add_argument( "--early-exit", default="6,6,6", type=str, help="number of decoder layers before word_del, mask_ins, word_ins", ) parser.add_argument( "--no-share-discriminator", action="store_true", help="separate parameters for discriminator", ) parser.add_argument( "--no-share-maskpredictor", action="store_true", help="separate parameters for mask-predictor", ) parser.add_argument( "--share-discriminator-maskpredictor", action="store_true", help="share the parameters for both mask-predictor and discriminator", ) parser.add_argument( "--sampling-for-deletion", action='store_true', help='instead of argmax, use sampling to predict the tokens' ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk ) mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) mask_ins_out, _ = self.decoder.forward_mask_ins( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out ) word_ins_out, _ = self.decoder.forward_word_ins( normalize=False, prev_output_tokens=masked_tgt_tokens, encoder_out=encoder_out ) # make online prediction if self.decoder.sampling_for_deletion: word_predictions = torch.multinomial( F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1).view( word_ins_out.size(0), -1) else: word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1] word_predictions.masked_scatter_( ~masked_tgt_masks, tgt_tokens[~masked_tgt_masks] ) # generate training labels for deletion word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad) word_del_out, _ = self.decoder.forward_word_del( normalize=False, prev_output_tokens=word_predictions, encoder_out=encoder_out) word_del_masks = word_predictions.ne(self.pad) return { "mask_ins": { "out": mask_ins_out, "tgt": mask_ins_targets, "mask": mask_ins_masks, "ls": 0.01, }, "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": masked_tgt_masks, "ls": self.args.label_smoothing, "nll_loss": True }, "word_del": { "out": word_del_out, "tgt": word_del_targets, "mask": word_del_masks } } def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn history = decoder_out.history bsz = output_tokens.size(0) if max_ratio is None: max_lens = torch.zeros_like(output_tokens).fill_(255) else: if encoder_out.encoder_padding_mask is None: max_src_len = encoder_out.encoder_out.size(0) src_lens = encoder_out.encoder_out.new(bsz).fill_(max_src_len) else: src_lens = (~encoder_out.encoder_padding_mask).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip word_del_score, word_del_attn = self.decoder.forward_word_del( normalize=True, prev_output_tokens=_skip(output_tokens, can_del_word), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word) ) word_del_pred = word_del_score.max(-1)[1].bool() _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.) if history is not None: history.append(output_tokens.clone()) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: mask_ins_score, _ = self.decoder.forward_mask_ins( normalize=True, prev_output_tokens=_skip(output_tokens, can_ins_mask), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask) ) if eos_penalty > 0.0: mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty mask_ins_pred = mask_ins_score.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) if history is not None: history.append(output_tokens.clone()) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: word_ins_score, word_ins_attn = self.decoder.forward_word_ins( normalize=True, prev_output_tokens=_skip(output_tokens, can_ins_word), encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word) ) word_ins_score, word_ins_pred = word_ins_score.max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.) if history is not None: history.append(output_tokens.clone()) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=history ) def initialize_output_tokens(self, encoder_out, src_tokens): initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2) initial_output_tokens[:, 0] = self.bos initial_output_tokens[:, 1] = self.eos initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out.encoder_out) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None ) class LevenshteinTransformerDecoder(FairseqNATDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None) self.embed_word_del = Embedding(2, self.output_embed_dim, None) # del_word, ins_mask, ins_word self.early_exit = [int(i) for i in args.early_exit.split(',')] assert len(self.early_exit) == 3 # copy layers for mask-predict/deletion self.layers_msk = None if getattr(args, "no_share_maskpredictor", False): self.layers_msk = nn.ModuleList([ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[1]) ]) self.layers_del = None if getattr(args, "no_share_discriminator", False): self.layers_del = nn.ModuleList([ TransformerDecoderLayer(args, no_encoder_attn) for _ in range(self.early_exit[0]) ]) if getattr(args, "share_discriminator_maskpredictor", False): assert getattr(args, "no_share_discriminator", False), "must set saperate discriminator" self.layers_msk = self.layers_del def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, layers=None, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) layers = self.layers if layers is None else layers early_exit = len(layers) if early_exit is None else early_exit for _, layer in enumerate(layers[: early_exit]): x, attn, _ = layer( x, encoder_out.encoder_out if encoder_out is not None else None, encoder_out.encoder_padding_mask if encoder_out is not None else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} @ensemble_decoder def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[1], layers=self.layers_msk, **unused ) features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) decoder_out = F.linear(features_cat, self.embed_mask_ins.weight) if normalize: return F.log_softmax(decoder_out, -1), extra['attn'] return decoder_out, extra['attn'] @ensemble_decoder def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[2], layers=self.layers, **unused ) decoder_out = self.output_layer(features) if normalize: return F.log_softmax(decoder_out, -1), extra['attn'] return decoder_out, extra['attn'] @ensemble_decoder def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused): features, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, early_exit=self.early_exit[0], layers=self.layers_del, **unused ) decoder_out = F.linear(features, self.embed_word_del.weight) if normalize: return F.log_softmax(decoder_out, -1), extra['attn'] return decoder_out, extra['attn'] @register_model_architecture("levenshtein_transformer", "levenshtein_transformer") def levenshtein_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.early_exit = getattr(args, "early_exit", "6,6,6") args.no_share_discriminator = getattr(args, "no_share_discriminator", False) args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False) args.share_discriminator_maskpredictor = getattr(args, "share_discriminator_maskpredictor", False) args.no_share_last_layer = getattr(args, "no_share_last_layer", False) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de" ) def levenshtein_transformer_wmt_en_de(args): levenshtein_base_architecture(args) # similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big" ) def levenshtein_transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) levenshtein_base_architecture(args) # default parameters used in tensor2tensor implementation @register_model_architecture( "levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big" ) def levenshtein_transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) levenshtein_transformer_vaswani_wmt_en_de_big(args)
19,562
39.841336
120
py
RegularizedBN
RegularizedBN-main/fairseq/models/nat/fairseq_nat_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from fairseq.models.transformer import TransformerModel, TransformerEncoder, TransformerDecoder from fairseq.modules.transformer_sentence_encoder import init_bert_params def ensemble_encoder(func): def wrapper(self, *args, **kwargs): if self.ensemble_models is None or len(self.ensemble_models) == 1: return func(self, *args, **kwargs) encoder_outs = [func(model, *args, **kwargs) for model in self.ensemble_models] _encoder_out = encoder_outs[0] def stack(key): outs = [getattr(e, key) for e in encoder_outs] return torch.stack(outs, -1) if outs[0] is not None else None return _encoder_out._replace( encoder_out=stack('encoder_out'), encoder_embedding=stack('encoder_embedding'), encoder_states=stack('encoder_states') ) return wrapper def ensemble_decoder(func): def wrapper(self, normalize=False, encoder_out=None, *args, **kwargs): if self.ensemble_models is None or len(self.ensemble_models) == 1: return func(self, normalize=normalize, encoder_out=encoder_out, *args, **kwargs) action_outs = [ func(model, normalize=normalize, encoder_out=encoder_out._replace( encoder_out=encoder_out.encoder_out[:, :, :, i] ), *args, **kwargs) for i, model in enumerate(self.ensemble_models) ] if not isinstance(action_outs[0], tuple): # return multiple values action_outs = [[a] for a in action_outs] else: action_outs = [list(a) for a in action_outs] ensembled_outs = [] for i in range(len(action_outs[0])): if i == 0 and normalize: ensembled_outs += [ torch.logsumexp( torch.stack([a[i] for a in action_outs], -1), dim=-1) - math.log(len(self.ensemble_models)) ] elif action_outs[0][i] is not None: ensembled_outs += [ torch.stack([a[i] for a in action_outs], -1) ] else: ensembled_outs += [None] if len(ensembled_outs) == 1: return ensembled_outs[0] return tuple(ensembled_outs) return wrapper class FairseqNATModel(TransformerModel): """ Abstract class for all nonautoregressive-based models """ def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) self.tgt_dict = decoder.dictionary self.bos = decoder.dictionary.bos() self.eos = decoder.dictionary.eos() self.pad = decoder.dictionary.pad() self.unk = decoder.dictionary.unk() self.ensemble_models = None @property def allow_length_beam(self): return False @property def allow_ensemble(self): return True def enable_ensemble(self, models): self.encoder.ensemble_models = [m.encoder for m in models] self.decoder.ensemble_models = [m.decoder for m in models] @staticmethod def add_args(parser): TransformerModel.add_args(parser) parser.add_argument( "--apply-bert-init", action="store_true", help="use custom param initialization for BERT", ) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = FairseqNATDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder @classmethod def build_encoder(cls, args, src_dict, embed_tokens): encoder = FairseqNATEncoder(args, src_dict, embed_tokens) if getattr(args, "apply_bert_init", False): encoder.apply(init_bert_params) return encoder def forward_encoder(self, encoder_inputs): return self.encoder(*encoder_inputs) def forward_decoder(self, *args, **kwargs): return NotImplementedError def initialize_output_tokens(self, *args, **kwargs): return NotImplementedError def forward(self, *args, **kwargs): return NotImplementedError class FairseqNATEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.ensemble_models = None @ensemble_encoder def forward(self, *args, **kwargs): return super().forward(*args, **kwargs) class FairseqNATDecoder(TransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn) self.ensemble_models = None
4,959
32.972603
95
py
RegularizedBN
RegularizedBN-main/fairseq/models/nat/nonautoregressive_ensembles.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq.models.nat import ( _fill, _skip, _skip_encoder_out, _apply_ins_masks, _apply_ins_words, _apply_del_words, ) class _EnsembleModelEncoder(object): def __init__(self, models): self.models = models def reorder_encoder_out(self, encoder_outs, new_order): encoder_outs = [ model.encoder.reorder_encoder_out(encoder_out, new_order) for model, encoder_out in zip(self.models, encoder_outs) ] return encoder_outs class BasicEnsembleModel(torch.nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models = torch.nn.ModuleList(models) self.bos = self.models[0].decoder.dictionary.bos() self.eos = self.models[0].decoder.dictionary.eos() self.pad = self.models[0].decoder.dictionary.pad() self.unk = self.models[0].decoder.dictionary.unk() self.encoder = _EnsembleModelEncoder(self.models) def has_encoder(self): return hasattr(self.models[0], 'encoder') def max_decoder_positions(self): return min(m.max_decoder_positions() for m in self.models) @torch.no_grad() def forward_encoder(self, encoder_input): if not self.has_encoder(): return None return [model.forward_encoder(encoder_input) for model in self.models] @torch.no_grad() def forward_decoder(self, *inputs): raise NotImplementedError def initialize_output_tokens(self, *inputs): raise NotImplementedError class EnsembleLevT(BasicEnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) @torch.no_grad() def forward_decoder(self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs): # LevT ensembling # A pipeline of three steps: deletion, placeholder, and word insertion. # We need to average scores in each step in a pipeline way because of dependence. # deletion output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores attn = decoder_out.attn bsz = output_tokens.size(0) if max_ratio is None: max_lens = output_tokens.new().fill_(255) else: if encoder_outs[0].encoder_padding_mask is None: src_lens = encoder_outs[0].encoder_out.new(bsz).fill_(encoder_outs[0].encoder_out.size(1)) else: src_lens = (~encoder_outs[0].encoder_padding_mask).sum(1) max_lens = (src_lens * max_ratio).clamp(min=10).long() # delete words # do not delete tokens if it is <s> </s> can_del_word = output_tokens.ne(self.pad).sum(1) > 2 if can_del_word.sum() != 0: # we cannot delete, skip output_tokens, output_scores, attn = self.forward_word_del( encoder_outs, output_tokens, output_scores, attn, can_del_word, ) # insert placeholders can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens if can_ins_mask.sum() != 0: output_tokens, output_scores = self.forward_mask_ins( encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens, ) # insert words can_ins_word = output_tokens.eq(self.unk).sum(1) > 0 if can_ins_word.sum() != 0: output_tokens, output_scores, attn = self.forward_word_ins( encoder_outs, output_tokens, output_scores, attn, can_ins_word, ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] attn = None if attn is None else attn[:, :cut_off, :] return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=attn, history=None ) def forward_word_del(self, encoder_outs, output_tokens, output_scores, attn, can_del_word): word_del_score_avg = [] word_del_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_del_out, word_del_attn = model.decoder.forward_word_del( _skip(output_tokens, can_del_word), _skip_encoder_out(model.encoder, encoder_out, can_del_word), ) word_del_score = F.log_softmax(word_del_out, 2) word_del_score_avg.append(word_del_score) word_del_attn_avg.append(word_del_attn) word_del_score_avg = torch.logsumexp(torch.stack(word_del_score_avg, dim=0), dim=0) - math.log(len(self.models)) word_del_pred = word_del_score_avg.max(-1)[1].bool() if word_del_attn_avg[0] is not None: word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0)/len(self.models) else: word_del_attn_avg = None _tokens, _scores, _attn = _apply_del_words( output_tokens[can_del_word], output_scores[can_del_word], word_del_attn_avg, word_del_pred, self.pad, self.bos, self.eos, ) output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad) output_scores = _fill(output_scores, can_del_word, _scores, 0) attn = _fill(attn, can_del_word, _attn, 0.) return output_tokens, output_scores, attn def forward_mask_ins(self, encoder_outs, output_tokens, output_scores, can_ins_mask, eos_penalty, max_lens): mask_ins_score_avg = [] for model, encoder_out in zip(self.models, encoder_outs): mask_ins_out, _ = model.decoder.forward_mask_ins( _skip(output_tokens, can_ins_mask), _skip_encoder_out(model.encoder, encoder_out, can_ins_mask), ) mask_ins_score = F.log_softmax(mask_ins_out, 2) if eos_penalty > 0.0: mask_ins_score[:, :, 0] -= eos_penalty mask_ins_score_avg.append(mask_ins_score) mask_ins_score_avg = torch.logsumexp(torch.stack(mask_ins_score_avg, dim=0), dim=0) - math.log(len(self.models)) mask_ins_pred = mask_ins_score_avg.max(-1)[1] mask_ins_pred = torch.min( mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred) ) _tokens, _scores = _apply_ins_masks( output_tokens[can_ins_mask], output_scores[can_ins_mask], mask_ins_pred, self.pad, self.unk, self.eos, ) output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_mask, _scores, 0) return output_tokens, output_scores def forward_word_ins(self, encoder_outs, output_tokens, output_scores, attn, can_ins_word): word_ins_score_avg = [] word_ins_attn_avg = [] for model, encoder_out in zip(self.models, encoder_outs): word_ins_out, word_ins_attn = model.decoder.forward_word_ins( _skip(output_tokens, can_ins_word), _skip_encoder_out(model.encoder, encoder_out, can_ins_word), ) word_ins_score = F.log_softmax(word_ins_out, 2) word_ins_score_avg.append(word_ins_score) word_ins_attn_avg.append(word_ins_attn) word_ins_score_avg = torch.logsumexp(torch.stack(word_ins_score_avg, dim=0), dim=0) - math.log(len(self.models)) if word_ins_attn_avg[0] is not None: word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0)/len(self.models) else: word_ins_attn_avg = None word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1) _tokens, _scores = _apply_ins_words( output_tokens[can_ins_word], output_scores[can_ins_word], word_ins_pred, word_ins_score_max, self.unk, ) output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad) output_scores = _fill(output_scores, can_ins_word, _scores, 0) attn = _fill(attn, can_ins_word, word_ins_attn, 0.) return output_tokens, output_scores, attn def initialize_output_tokens(self, encoder_outs, src_tokens): # LevT doesn't do length prediction. return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens)
9,020
37.883621
120
py
RegularizedBN
RegularizedBN-main/fairseq/models/nat/insertion_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import ( LevenshteinTransformerDecoder, LevenshteinTransformerModel, FairseqNATModel, ensemble_decoder ) from fairseq.models.transformer import Linear from fairseq.utils import new_arange from fairseq.modules.transformer_sentence_encoder import init_bert_params class NegativeDistanceScore(object): def __init__(self): # pre-compute some values self.scores = {} self.scores[0.5] = self.compute_score_full(50, 0.5) self.scores[1.0] = self.compute_score_full(50, 1.0) self.scores[2.0] = self.compute_score_full(50, 2.0) def __call__(self, i, L, tau): if (tau is None) or (tau > 1000): return 1 / L if tau in self.scores: if L < self.scores[tau].shape[0]: return self.scores[tau][L - 1, i] return self.compute_score(L, tau)[i] def compute_score(self, L, tau): s = np.array([-abs(L / 2 - i) / tau for i in range(L)]) s = np.exp(s - s.max()) return s / s.sum() def compute_score_full(self, L, tau): s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau s = np.tril(s, 0) + np.triu(s - float("inf"), 1) s = np.exp(s - s.max(1, keepdims=True)) return s / s.sum(1, keepdims=True) neg_scorer = NegativeDistanceScore() def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n') raise e B = in_tokens.size(0) T = in_tokens.size(1) V = vocab_size with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) insert_labels = [a[:-1] for a in full_labels] # numericalize1 insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float() insert_index, insert_labels = zip( *[ (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau)) for i, labels in enumerate(insert_labels) for j, label in enumerate(labels[1:-1]) for k, w in enumerate(label) ] ) # HACK 1:-1 insert_index, insert_labels = [ torch.tensor(list(a), device=in_tokens.device) for a in [insert_index, insert_labels] ] insert_label_tensors.scatter_(0, insert_index.long(), insert_labels) insert_label_tensors = insert_label_tensors.view(B, T - 1, V) return insert_label_tensors def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx): padding_masks = in_tokens[:, 1:].eq(padding_idx) word_ins_scores.masked_fill_(padding_masks, 0.0) word_ins_pred.masked_fill_(padding_masks, padding_idx) in_coords = new_arange(in_tokens).type_as(in_scores) # shift all padding predictions to infinite out_coords = (in_coords[:, 1:] - 0.5).masked_fill( word_ins_pred.eq(padding_idx), float("inf") ) out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1] out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords) out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords) return out_tokens, out_scores @register_model("insertion_transformer") class InsertionTransformerModel(LevenshteinTransformerModel): def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) parser.add_argument("--label-tau", default=None, type=float) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): assert tgt_tokens is not None, "forward function only supports training." # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # generate training labels for insertion word_ins_out = self.decoder.forward_word_ins( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out ) word_ins_tgt = _get_ins_targets( prev_output_tokens, tgt_tokens, self.pad, self.unk, len(self.tgt_dict), tau=self.decoder.label_tau, ).type_as(word_ins_out) word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_masks, "ls": self.args.label_smoothing, "nll_loss": True } } def forward_decoder( self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs ): output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # TODO: decoding for InsertionTransformer word_ins_score = self.decoder.forward_word_ins( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out ) if eos_penalty > 0.0: word_ins_score[:, :, self.pad] -= eos_penalty word_ins_score, word_ins_pred = word_ins_score.max(-1) output_tokens, output_scores = _apply_ins_words( output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad ) # delete some unnecessary paddings cut_off = output_tokens.ne(self.pad).sum(1).max() output_tokens = output_tokens[:, :cut_off] output_scores = output_scores[:, :cut_off] if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history ) class InsertionTransformerDecoder(LevenshteinTransformerDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): # use the TransformerDecoder's __init__ super(LevenshteinTransformerDecoder, self).__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim) self.label_tau = getattr(args, "label_tau", None) @ensemble_decoder def forward_word_ins(self, normalize, encoder_out, prev_output_tokens): features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0] features = self.pool_out( torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) ) decoder_out = self.output_layer(features) return F.log_softmax(decoder_out, -1) if normalize else decoder_out def forward_mask_ins(self, *args, **kwargs): raise NotImplementedError def forward_word_del(self, *args, **kwargs): raise NotImplementedError @register_model_architecture("insertion_transformer", "insertion_transformer") def insertion_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # special for insertion transformer args.label_tau = getattr(args, "label_tau", None)
10,448
36.185053
88
py
RegularizedBN
RegularizedBN-main/fairseq/models/nat/nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import Embedding from fairseq.models.nat import ( FairseqNATModel, FairseqNATDecoder, ensemble_decoder ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def _mean_pooling(enc_feats, src_masks): # enc_feats: T x B x C # src_masks: B x T or None if src_masks is None: enc_feats = enc_feats.mean(0) else: src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats) enc_feats = ( (enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None] ).sum(0) return enc_feats def _argmax(x, dim): return (x == x.max(dim, keepdim=True)[0]).type_as(x) def _uniform_assignment(src_lens, trg_lens): max_trg_len = trg_lens.max() steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size # max_trg_len index_t = utils.new_arange(trg_lens, max_trg_len).float() index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len index_t = torch.round(index_t).long().detach() return index_t @register_model("nonautoregressive_transformer") class NATransformerModel(FairseqNATModel): @property def allow_length_beam(self): return True @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) # length prediction parser.add_argument("--src-embedding-copy", action="store_true", help="copy encoder word embeddings as the initial input of the decoder") parser.add_argument("--pred-length-offset", action="store_true", help="predicting the length difference between the target and source sentences") parser.add_argument("--sg-length-pred", action="store_true", help="stop the gradients back-propagated from the length predictor") parser.add_argument("--length-loss-factor", type=float, help="weights on the length prediction loss") @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = NATransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length(normalize=False, encoder_out=encoder_out) length_tgt = self.decoder.forward_length_prediction(length_out, encoder_out, tgt_tokens) # decoding word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out) return { "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": tgt_tokens.ne(self.pad), "ls": self.args.label_smoothing, "nll_loss": True }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor } } def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): step = decoder_out.step output_tokens = decoder_out.output_tokens output_scores = decoder_out.output_scores history = decoder_out.history # execute the decoder output_masks = output_tokens.ne(self.pad) _scores, _tokens = self.decoder( normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out, step=step, ).max(-1) output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) output_scores.masked_scatter_(output_masks, _scores[output_masks]) if history is not None: history.append(output_tokens.clone()) return decoder_out._replace( output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history ) def initialize_output_tokens(self, encoder_out, src_tokens): # length prediction length_tgt = self.decoder.forward_length_prediction( self.decoder.forward_length(normalize=True, encoder_out=encoder_out), encoder_out=encoder_out ) max_length = length_tgt.clamp_(min=2).max() idx_length = utils.new_arange(src_tokens, max_length) initial_output_tokens = src_tokens.new_zeros( src_tokens.size(0), max_length ).fill_(self.pad) initial_output_tokens.masked_fill_( idx_length[None, :] < length_tgt[:, None], self.unk ) initial_output_tokens[:, 0] = self.bos initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(encoder_out.encoder_out) return DecoderOut( output_tokens=initial_output_tokens, output_scores=initial_output_scores, attn=None, step=0, max_step=0, history=None ) def regenerate_length_beam(self, decoder_out, beam_size): output_tokens = decoder_out.output_tokens length_tgt = output_tokens.ne(self.pad).sum(1) length_tgt = length_tgt[:, None] + utils.new_arange(length_tgt, 1, beam_size) - beam_size // 2 length_tgt = length_tgt.view(-1).clamp_(min=2) max_length = length_tgt.max() idx_length = utils.new_arange(length_tgt, max_length) initial_output_tokens = output_tokens.new_zeros( length_tgt.size(0), max_length ).fill_(self.pad) initial_output_tokens.masked_fill_( idx_length[None, :] < length_tgt[:, None], self.unk ) initial_output_tokens[:, 0] = self.bos initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) initial_output_scores = initial_output_tokens.new_zeros( *initial_output_tokens.size() ).type_as(decoder_out.output_scores) return decoder_out._replace( output_tokens=initial_output_tokens, output_scores=initial_output_scores ) class NATransformerDecoder(FairseqNATDecoder): def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.dictionary = dictionary self.bos = dictionary.bos() self.unk = dictionary.unk() self.eos = dictionary.eos() self.encoder_embed_dim = args.encoder_embed_dim self.sg_length_pred = getattr(args, "sg_length_pred", False) self.pred_length_offset = getattr(args, "pred_length_offset", False) self.length_loss_factor = getattr(args, "length_loss_factor", 0.1) self.src_embedding_copy = getattr(args, "src_embedding_copy", False) self.embed_length = Embedding(256, self.encoder_embed_dim, None) @ensemble_decoder def forward(self, normalize, encoder_out, prev_output_tokens, step=0, **unused): features, _ = self.extract_features( prev_output_tokens, encoder_out=encoder_out, embedding_copy=(step == 0) & self.src_embedding_copy, ) decoder_out = self.output_layer(features) return F.log_softmax(decoder_out, -1) if normalize else decoder_out @ensemble_decoder def forward_length(self, normalize, encoder_out): enc_feats = encoder_out.encoder_out # T x B x C src_masks = encoder_out.encoder_padding_mask # B x T or None enc_feats = _mean_pooling(enc_feats, src_masks) if self.sg_length_pred: enc_feats = enc_feats.detach() length_out = F.linear(enc_feats, self.embed_length.weight) return F.log_softmax(length_out, -1) if normalize else length_out def extract_features( self, prev_output_tokens, encoder_out=None, early_exit=None, embedding_copy=False, **unused ): """ Similar to *forward* but only return features. Inputs: prev_output_tokens: Tensor(B, T) encoder_out: a dictionary of hidden states and masks Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs the LevenshteinTransformer decoder has full-attention to all generated tokens """ # embedding if embedding_copy: src_embd = encoder_out.encoder_embedding src_mask = encoder_out.encoder_padding_mask src_mask = ( ~src_mask if src_mask is not None else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool() ) x, decoder_padding_mask = self.forward_embedding( prev_output_tokens, self.forward_copying_source( src_embd, src_mask, prev_output_tokens.ne(self.padding_idx) ), ) else: x, decoder_padding_mask = self.forward_embedding(prev_output_tokens) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for i, layer in enumerate(self.layers): # early exit from the decoder. if (early_exit is not None) and (i >= early_exit): break x, attn, _ = layer( x, encoder_out.encoder_out if encoder_out is not None else None, encoder_out.encoder_padding_mask if encoder_out is not None else None, self_attn_mask=None, self_attn_padding_mask=decoder_padding_mask, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": attn, "inner_states": inner_states} def forward_embedding(self, prev_output_tokens, states=None): # embed positions positions = ( self.embed_positions(prev_output_tokens) if self.embed_positions is not None else None ) # embed tokens and positions if states is None: x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) else: x = states if positions is not None: x += positions x = self.dropout_module(x) decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) return x, decoder_padding_mask def forward_copying_source(self, src_embeds, src_masks, tgt_masks): length_sources = src_masks.sum(1) length_targets = tgt_masks.sum(1) mapped_inputs = _uniform_assignment(length_sources, length_targets).masked_fill( ~tgt_masks, 0 ) copied_embedding = torch.gather( src_embeds, 1, mapped_inputs.unsqueeze(-1).expand( *mapped_inputs.size(), src_embeds.size(-1) ), ) return copied_embedding def forward_length_prediction(self, length_out, encoder_out, tgt_tokens=None): enc_feats = encoder_out.encoder_out # T x B x C src_masks = encoder_out.encoder_padding_mask # B x T or None if self.pred_length_offset: if src_masks is None: src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_( enc_feats.size(0) ) else: src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0) src_lengs = src_lengs.long() if tgt_tokens is not None: # obtain the length target tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long() if self.pred_length_offset: length_tgt = tgt_lengs - src_lengs + 128 else: length_tgt = tgt_lengs length_tgt = length_tgt.clamp(min=0, max=255) else: # predict the length target (greedy for now) # TODO: implementing length-beam pred_lengs = length_out.max(-1)[1] if self.pred_length_offset: length_tgt = pred_lengs - 128 + src_lengs else: length_tgt = pred_lengs return length_tgt @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer" ) def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture( "nonautoregressive_transformer", "nonautoregressive_transformer_wmt_en_de" ) def nonautoregressive_transformer_wmt_en_de(args): base_architecture(args)
16,114
36.917647
108
py
RegularizedBN
RegularizedBN-main/fairseq/models/nat/iterative_nonautoregressive_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import NATransformerModel def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1): # s: input batch # V: vocabulary size rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device) choices = torch.rand(size=s.size(), device=s.device) choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1) replace = choices < beta / 3 repeat = (choices >= beta / 3) & (choices < beta * 2 / 3) swap = (choices >= beta * 2 / 3) & (choices < beta) safe = choices >= beta for i in range(s.size(1) - 1): rand_word = rand_words[:, i] next_word = s[:, i + 1] self_word = s[:, i] replace_i = replace[:, i] swap_i = swap[:, i] & (next_word != 3) repeat_i = repeat[:, i] & (next_word != 3) safe_i = safe[:, i] | ((next_word == 3) & (~replace_i)) s[:, i] = ( self_word * (safe_i | repeat_i).long() + next_word * swap_i.long() + rand_word * replace_i.long() ) s[:, i + 1] = ( next_word * (safe_i | replace_i).long() + self_word * (swap_i | repeat_i).long() ) return s def gumbel_noise(input, TINY=1e-8): return input.new_zeros(*input.size()).uniform_().add_( TINY).log_().neg_().add_(TINY).log_().neg_() @register_model("iterative_nonautoregressive_transformer") class IterNATransformerModel(NATransformerModel): @staticmethod def add_args(parser): NATransformerModel.add_args(parser) parser.add_argument("--train-step", type=int, help="number of refinement iterations during training") parser.add_argument("--dae-ratio", type=float, help="the probability of switching to the denoising auto-encoder loss") parser.add_argument("--stochastic-approx", action="store_true", help="sampling from the decoder as the inputs for next iteration") @classmethod def build_model(cls, args, task): model = super().build_model(args, task) model.train_step = getattr(args, "train_step", 4) model.dae_ratio = getattr(args, "dae_ratio", 0.5) model.stochastic_approx = getattr(args, "stochastic_approx", False) return model def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs ): B, T = prev_output_tokens.size() # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) # length prediction length_out = self.decoder.forward_length(normalize=False, encoder_out=encoder_out) length_tgt = self.decoder.forward_length_prediction(length_out, encoder_out, tgt_tokens) # decoding word_ins_outs, word_ins_tgts, word_ins_masks = [], [], [] for t in range(self.train_step): word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, step=t, ) word_ins_tgt = tgt_tokens word_ins_mask = word_ins_tgt.ne(self.pad) word_ins_outs.append(word_ins_out) word_ins_tgts.append(word_ins_tgt) word_ins_masks.append(word_ins_mask) if t < (self.train_step - 1): # prediction for next iteration if self.stochastic_approx: word_ins_prediction = ( word_ins_out + gumbel_noise(word_ins_out) ).max(-1)[1] else: word_ins_prediction = word_ins_out.max(-1)[1] prev_output_tokens = prev_output_tokens.masked_scatter( word_ins_mask, word_ins_prediction[word_ins_mask] ) if self.dae_ratio > 0: # we do not perform denoising for the first iteration corrputed = ( torch.rand(size=(B,), device=prev_output_tokens.device) < self.dae_ratio ) corrputed_tokens = _sequential_poisoning( tgt_tokens[corrputed], len(self.tgt_dict), 0.33, self.bos, self.eos, self.pad, ) prev_output_tokens[corrputed] = corrputed_tokens # concat everything word_ins_out = torch.cat(word_ins_outs, 0) word_ins_tgt = torch.cat(word_ins_tgts, 0) word_ins_mask = torch.cat(word_ins_masks, 0) return { "word_ins": { "out": word_ins_out, "tgt": word_ins_tgt, "mask": word_ins_mask, "ls": self.args.label_smoothing, "nll_loss": True }, "length": { "out": length_out, "tgt": length_tgt, "factor": self.decoder.length_loss_factor } } @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer" ) def inat_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) args.train_step = getattr(args, "train_step", 4) args.dae_ratio = getattr(args, "dae_ratio", 0.5) args.stochastic_approx = getattr(args, "stochastic_approx", False) @register_model_architecture( "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer_wmt_en_de", ) def iter_nat_wmt_en_de(args): inat_base_architecture(args)
8,427
39.912621
99
py
RegularizedBN
RegularizedBN-main/fairseq/models/roberta/hub_interface.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import encoders class RobertaHubInterface(nn.Module): """A simple PyTorch Hub interface to RoBERTa. Usage: https://github.com/pytorch/fairseq/tree/master/examples/roberta """ def __init__(self, args, task, model): super().__init__() self.args = args self.task = task self.model = model self.bpe = encoders.build_bpe(args) # this is useful for determining the device self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def encode(self, sentence: str, *addl_sentences, no_separator=False) -> torch.LongTensor: """ BPE-encode a sentence (or multiple sentences). Every sequence begins with a beginning-of-sentence (`<s>`) symbol. Every sentence ends with an end-of-sentence (`</s>`) and we use an extra end-of-sentence (`</s>`) as a separator. Example (single sentence): `<s> a b c </s>` Example (sentence pair): `<s> d e f </s> </s> 1 2 3 </s>` The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE requires leading spaces. For example:: >>> roberta.encode('Hello world').tolist() [0, 31414, 232, 2] >>> roberta.encode(' world').tolist() [0, 232, 2] >>> roberta.encode('world').tolist() [0, 8331, 2] """ bpe_sentence = '<s> ' + self.bpe.encode(sentence) + ' </s>' for s in addl_sentences: bpe_sentence += (' </s>' if not no_separator else '') bpe_sentence += ' ' + self.bpe.encode(s) + ' </s>' tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False, add_if_not_exist=False) return tokens.long() def decode(self, tokens: torch.LongTensor): assert tokens.dim() == 1 tokens = tokens.numpy() if tokens[0] == self.task.source_dictionary.bos(): tokens = tokens[1:] # remove <s> eos_mask = (tokens == self.task.source_dictionary.eos()) doc_mask = eos_mask[1:] & eos_mask[:-1] sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) sentences = [self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences] if len(sentences) == 1: return sentences[0] return sentences def extract_features(self, tokens: torch.LongTensor, return_all_hiddens: bool = False) -> torch.Tensor: if tokens.dim() == 1: tokens = tokens.unsqueeze(0) if tokens.size(-1) > self.model.max_positions(): raise ValueError('tokens exceeds maximum length: {} > {}'.format( tokens.size(-1), self.model.max_positions() )) features, extra = self.model( tokens.to(device=self.device), features_only=True, return_all_hiddens=return_all_hiddens, ) if return_all_hiddens: # convert from T x B x C -> B x T x C inner_states = extra['inner_states'] return [inner_state.transpose(0, 1) for inner_state in inner_states] else: return features # just the last layer's features def register_classification_head( self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs ): self.model.register_classification_head( name, num_classes=num_classes, embedding_size=embedding_size, **kwargs ) def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): features = self.extract_features(tokens.to(device=self.device)) logits = self.model.classification_heads[head](features) if return_logits: return logits return F.log_softmax(logits, dim=-1) def extract_features_aligned_to_words(self, sentence: str, return_all_hiddens: bool = False) -> torch.Tensor: """Extract RoBERTa features, aligned to spaCy's word-level tokenizer.""" from fairseq.models.roberta import alignment_utils from spacy.tokens import Doc nlp = alignment_utils.spacy_nlp() tokenizer = alignment_utils.spacy_tokenizer() # tokenize both with GPT-2 BPE and spaCy bpe_toks = self.encode(sentence) spacy_toks = tokenizer(sentence) spacy_toks_ws = [t.text_with_ws for t in tokenizer(sentence)] alignment = alignment_utils.align_bpe_to_words(self, bpe_toks, spacy_toks_ws) # extract features and align them features = self.extract_features(bpe_toks, return_all_hiddens=return_all_hiddens) features = features.squeeze(0) aligned_feats = alignment_utils.align_features_to_words(self, features, alignment) # wrap in spaCy Doc doc = Doc( nlp.vocab, words=['<s>'] + [x.text for x in spacy_toks] + ['</s>'], spaces=[True] + [x.endswith(' ') for x in spacy_toks_ws[:-1]] + [True, False], ) assert len(doc) == aligned_feats.size(0) doc.user_token_hooks['vector'] = lambda token: aligned_feats[token.i] return doc def fill_mask(self, masked_input: str, topk: int = 5): masked_token = '<mask>' assert masked_token in masked_input and masked_input.count(masked_token) == 1, \ "Please add one {0} token for the input, eg: 'He is a {0} guy'".format(masked_token) text_spans = masked_input.split(masked_token) text_spans_bpe = (' {0} '.format(masked_token)).join( [self.bpe.encode(text_span.rstrip()) for text_span in text_spans] ).strip() tokens = self.task.source_dictionary.encode_line( '<s> ' + text_spans_bpe + ' </s>', append_eos=False, add_if_not_exist=False, ) masked_index = (tokens == self.task.mask_idx).nonzero() if tokens.dim() == 1: tokens = tokens.unsqueeze(0) with utils.eval(self.model): features, extra = self.model( tokens.long().to(device=self.device), features_only=False, return_all_hiddens=False, ) logits = features[0, masked_index, :].squeeze() prob = logits.softmax(dim=0) values, index = prob.topk(k=topk, dim=0) topk_predicted_token_bpe = self.task.source_dictionary.string(index) topk_filled_outputs = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ')): predicted_token = self.bpe.decode(predicted_token_bpe) # Quick hack to fix https://github.com/pytorch/fairseq/issues/1306 if predicted_token_bpe.startswith('\u2581'): predicted_token = ' ' + predicted_token if " {0}".format(masked_token) in masked_input: topk_filled_outputs.append(( masked_input.replace( ' {0}'.format(masked_token), predicted_token ), values[index].item(), predicted_token, )) else: topk_filled_outputs.append(( masked_input.replace(masked_token, predicted_token), values[index].item(), predicted_token, )) return topk_filled_outputs def disambiguate_pronoun(self, sentence: str) -> bool: """ Usage:: >>> disambiguate_pronoun('The _trophy_ would not fit in the brown suitcase because [it] was too big.') True >>> disambiguate_pronoun('The trophy would not fit in the brown suitcase because [it] was too big.') 'The trophy' """ assert hasattr(self.task, 'disambiguate_pronoun'), \ 'roberta.disambiguate_pronoun() requires a model trained with the WSC task.' with utils.eval(self.model): return self.task.disambiguate_pronoun(self.model, sentence, use_cuda=self.device.type == 'cuda')
8,410
40.029268
114
py
RegularizedBN
RegularizedBN-main/fairseq/models/roberta/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ RoBERTa: A Robustly Optimized BERT Pretraining Approach. """ import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from .hub_interface import RobertaHubInterface logger = logging.getLogger(__name__) @register_model('roberta') class RobertaModel(FairseqEncoderModel): @classmethod def hub_models(cls): return { 'roberta.base': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.base.tar.gz', 'roberta.large': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.tar.gz', 'roberta.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.mnli.tar.gz', 'roberta.large.wsc': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.wsc.tar.gz', } def __init__(self, args, encoder): super().__init__(encoder) self.args = args # We follow BERT's random weight initialization self.apply(init_bert_params) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument('--encoder-layers', type=int, metavar='L', help='num encoder layers') parser.add_argument('--encoder-embed-dim', type=int, metavar='H', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='F', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-attention-heads', type=int, metavar='A', help='num encoder attention heads') parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use for pooler layer') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', type=float, metavar='D', help='dropout probability after activation in FFN') parser.add_argument('--pooler-dropout', type=float, metavar='D', help='dropout probability in the masked_lm pooler layers') parser.add_argument('--max-positions', type=int, help='number of positional embeddings to learn') parser.add_argument('--load-checkpoint-heads', action='store_true', help='(re-)register and load heads when loading checkpoints') # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for encoder') parser.add_argument('--encoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0, help='iterative PQ quantization noise at training time') parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8, help='block size of quantization noise at training time') parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0, help='scalar quantization noise and scalar quantization at training time') parser.add_argument('--untie-weights-roberta', action='store_true', help='Untie weights between embeddings and classifiers in RoBERTa') @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_architecture(args) if not hasattr(args, 'max_positions'): args.max_positions = args.tokens_per_sample encoder = RobertaEncoder(args, task.source_dictionary) return cls(args, encoder) def forward(self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None, **kwargs): if classification_head_name is not None: features_only = True x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs) if classification_head_name is not None: x = self.classification_heads[classification_head_name](x) return x, extra def get_normalized_probs(self, net_output, log_probs, sample=None): """Get normalized probabilities (or log probs) from a net's output.""" logits = net_output[0].float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs): """Register a classification head.""" if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' 'and inner_dim {} (prev: {})'.format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = RobertaClassificationHead( self.args.encoder_embed_dim, inner_dim or self.args.encoder_embed_dim, num_classes, self.args.pooler_activation_fn, self.args.pooler_dropout, self.args.quant_noise_pq, self.args.quant_noise_pq_block_size, ) @property def supported_targets(self): return {'self'} @classmethod def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='gpt2', **kwargs): from fairseq import hub_utils print(model_name_or_path) x = hub_utils.from_pretrained( model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs, ) #print("here1") return RobertaHubInterface(x['args'], x['task'], x['models'][0]) def upgrade_state_dict_named(self, state_dict, name): prefix = name + '.' if name != '' else '' # rename decoder -> encoder before upgrading children modules for k in list(state_dict.keys()): if k.startswith(prefix + 'decoder'): new_k = prefix + 'encoder' + k[len(prefix + 'decoder'):] state_dict[new_k] = state_dict[k] del state_dict[k] # upgrade children modules super().upgrade_state_dict_named(state_dict, name) # Handle new classification heads present in the state dict. current_head_names = ( [] if not hasattr(self, 'classification_heads') else self.classification_heads.keys() ) keys_to_delete = [] for k in state_dict.keys(): if not k.startswith(prefix + 'classification_heads.'): continue head_name = k[len(prefix + 'classification_heads.'):].split('.')[0] num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0) inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0) if getattr(self.args, 'load_checkpoint_heads', False): if head_name not in current_head_names: self.register_classification_head(head_name, num_classes, inner_dim) else: if head_name not in current_head_names: logger.warning( 'deleting classification head ({}) from checkpoint ' 'not present in current model: {}'.format(head_name, k) ) keys_to_delete.append(k) elif ( num_classes != self.classification_heads[head_name].out_proj.out_features or inner_dim != self.classification_heads[head_name].dense.out_features ): logger.warning( 'deleting classification head ({}) from checkpoint ' 'with different dimensions than current model: {}'.format(head_name, k) ) keys_to_delete.append(k) for k in keys_to_delete: del state_dict[k] # Copy any newly-added classification heads into the state dict # with their current weights. if hasattr(self, 'classification_heads'): cur_state = self.classification_heads.state_dict() for k, v in cur_state.items(): if prefix + 'classification_heads.' + k not in state_dict: logger.info('Overwriting ' + prefix + 'classification_heads.' + k) state_dict[prefix + 'classification_heads.' + k] = v class RobertaLMHead(nn.Module): """Head for masked language modeling.""" def __init__(self, embed_dim, output_dim, activation_fn, weight=None): super().__init__() self.dense = nn.Linear(embed_dim, embed_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.layer_norm = LayerNorm(embed_dim) if weight is None: weight = nn.Linear(embed_dim, output_dim, bias=False).weight self.weight = weight self.bias = nn.Parameter(torch.zeros(output_dim)) def forward(self, features, masked_tokens=None, **kwargs): # Only project the masked tokens while training, # saves both memory and computation if masked_tokens is not None: features = features[masked_tokens, :] x = self.dense(features) x = self.activation_fn(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = F.linear(x, self.weight) + self.bias return x class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, q_noise=0, qn_block_size=8): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = apply_quant_noise_( nn.Linear(inner_dim, num_classes), q_noise, qn_block_size ) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x class RobertaEncoder(FairseqEncoder): """RoBERTa encoder.""" def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) self.sentence_encoder = TransformerSentenceEncoder( padding_idx=dictionary.pad(), vocab_size=len(dictionary), num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, layerdrop=args.encoder_layerdrop, max_seq_len=args.max_positions, num_segments=0, encoder_normalize_before=True, apply_bert_init=True, activation_fn=args.activation_fn, q_noise=args.quant_noise_pq, qn_block_size=args.quant_noise_pq_block_size, ) args.untie_weights_roberta = getattr(args, 'untie_weights_roberta', False) self.lm_head = RobertaLMHead( embed_dim=args.encoder_embed_dim, output_dim=len(dictionary), activation_fn=args.activation_fn, weight=self.sentence_encoder.embed_tokens.weight if not args.untie_weights_roberta else None, ) def forward(self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, **unused): """ Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` features_only (bool, optional): skip LM head and just return features. If True, the output will be of shape `(batch, src_len, embed_dim)`. return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). Returns: tuple: - the LM output of shape `(batch, src_len, vocab)` - a dictionary of additional data, where 'inner_states' is a list of hidden states. Note that the hidden states have shape `(src_len, batch, vocab)`. """ x, extra = self.extract_features(src_tokens, return_all_hiddens=return_all_hiddens) if not features_only: x = self.output_layer(x, masked_tokens=masked_tokens) return x, extra def extract_features(self, src_tokens, return_all_hiddens=False, **unused): inner_states, _ = self.sentence_encoder( src_tokens, last_state_only=not return_all_hiddens, ) features = inner_states[-1].transpose(0, 1) # T x B x C -> B x T x C return features, {'inner_states': inner_states if return_all_hiddens else None} def output_layer(self, features, masked_tokens=None, **unused): return self.lm_head(features, masked_tokens) def max_positions(self): """Maximum output length supported by the encoder.""" return self.args.max_positions @register_model_architecture('roberta', 'roberta') def base_architecture(args): args.encoder_layers = getattr(args, 'encoder_layers', 12) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.0) args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0) args.encoder_layers_to_keep = getattr(args, 'encoder_layers_to_keep', None) args.encoder_layerdrop = getattr(args, 'encoder_layerdrop', 0.0) @register_model_architecture('roberta', 'roberta_base') def roberta_base_architecture(args): base_architecture(args) @register_model_architecture('roberta', 'roberta_large') def roberta_large_architecture(args): args.encoder_layers = getattr(args, 'encoder_layers', 24) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) base_architecture(args) @register_model_architecture('roberta', 'xlm') def xlm_architecture(args): args.encoder_layers = getattr(args, 'encoder_layers', 16) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1280) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1280*4) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) base_architecture(args)
17,539
42.9599
122
py
RegularizedBN
RegularizedBN-main/fairseq/models/roberta/alignment_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import Counter from typing import List import torch def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]): """ Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy). Args: roberta (RobertaHubInterface): RoBERTa instance bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)` other_tokens (List[str]): other tokens of shape `(T_words)` Returns: List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*. """ assert bpe_tokens.dim() == 1 assert bpe_tokens[0] == 0 def clean(text): return text.strip() # remove whitespaces to simplify alignment bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens] bpe_tokens = [clean(roberta.bpe.decode(x) if x not in {'<s>', ''} else x) for x in bpe_tokens] other_tokens = [clean(str(o)) for o in other_tokens] # strip leading <s> bpe_tokens = bpe_tokens[1:] assert ''.join(bpe_tokens) == ''.join(other_tokens) # create alignment from every word to a list of BPE tokens alignment = [] bpe_toks = filter(lambda item: item[1] != '', enumerate(bpe_tokens, start=1)) j, bpe_tok = next(bpe_toks) for other_tok in other_tokens: bpe_indices = [] while True: if other_tok.startswith(bpe_tok): bpe_indices.append(j) other_tok = other_tok[len(bpe_tok):] try: j, bpe_tok = next(bpe_toks) except StopIteration: j, bpe_tok = None, None elif bpe_tok.startswith(other_tok): # other_tok spans multiple BPE tokens bpe_indices.append(j) bpe_tok = bpe_tok[len(other_tok):] other_tok = '' else: raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok)) if other_tok == '': break assert len(bpe_indices) > 0 alignment.append(bpe_indices) assert len(alignment) == len(other_tokens) return alignment def align_features_to_words(roberta, features, alignment): """ Align given features to words. Args: roberta (RobertaHubInterface): RoBERTa instance features (torch.Tensor): features to align of shape `(T_bpe x C)` alignment: alignment between BPE tokens and words returned by func:`align_bpe_to_words`. """ assert features.dim() == 2 bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices) assert bpe_counts[0] == 0 # <s> shouldn't be aligned denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))]) weighted_features = features / denom.unsqueeze(-1) output = [weighted_features[0]] largest_j = -1 for bpe_indices in alignment: output.append(weighted_features[bpe_indices].sum(dim=0)) largest_j = max(largest_j, *bpe_indices) for j in range(largest_j + 1, len(features)): output.append(weighted_features[j]) output = torch.stack(output) assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4) return output def spacy_nlp(): if getattr(spacy_nlp, '_nlp', None) is None: try: from spacy.lang.en import English spacy_nlp._nlp = English() except ImportError: raise ImportError('Please install spacy with: pip install spacy') return spacy_nlp._nlp def spacy_tokenizer(): if getattr(spacy_tokenizer, '_tokenizer', None) is None: try: nlp = spacy_nlp() spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp) except ImportError: raise ImportError('Please install spacy with: pip install spacy') return spacy_tokenizer._tokenizer
4,074
34.12931
98
py
RegularizedBN
RegularizedBN-main/fairseq/models/huggingface/hf_gpt2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys from typing import Dict, List, Optional import torch from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) try: # Prepend the transformers submodule to the path, so that # it's prioritized over other installations. This allows # making local changes in the submodule. sys.path.insert( 0, os.path.join(os.path.dirname(__file__), 'transformers', 'src') ) from transformers import AutoModel, GPT2Config, GPT2LMHeadModel has_hf = True except ImportError: has_hf = False logger = logging.getLogger(__name__) DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model('hf_gpt2') class HuggingFaceGPT2LanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) if not has_hf: raise ImportError( '\n\nPlease install huggingface/transformers with:' '\n\n pip install transformers' '\n\nOr to make local edits, install the submodule:' '\n\n git submodule update --init ' 'fairseq/models/huggingface/transformers' ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--embed-dim', type=int, metavar='N', help='embedding dimension') parser.add_argument('--num-attention-heads', type=int, metavar='N', help='num attention heads') parser.add_argument('--num-layers', type=int, metavar='N', help='num layers') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability for all fully connected layers ' 'in the embeddings, encoder, and pooler') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" default_architecture(args) return cls(HuggingFaceGPT2Decoder(args, task)) class HuggingFaceGPT2Decoder(FairseqIncrementalDecoder): def __init__(self, args, task): super().__init__(task.target_dictionary) try: # Prepend the transformers submodule to the path, so that # it's prioritized over other installations. This allows # making local changes in the submodule. sys.path.insert( 0, os.path.join(os.path.dirname(__file__), 'transformers', 'src') ) from transformers import GPT2Config, GPT2LMHeadModel except ImportError: raise ImportError( '\n\nPlease install huggingface/transformers with:' '\n\n pip install transformers' '\n\nOr to make local edits, install the submodule:' '\n\n git submodule update --init ' 'fairseq/models/huggingface/transformers' ) config = GPT2Config( vocab_size=len(task.target_dictionary), n_positions=args.max_target_positions + 1, n_ctx=args.max_target_positions, n_embd=args.embed_dim, n_layer=args.num_layers, n_head=args.num_attention_heads, resid_pdrop=args.dropout, embd_pdrop=args.dropout, attn_pdrop=args.attention_dropout, layer_norm_epsilon=1e-6, ) self.model = GPT2LMHeadModel(config) # set zero embedding for padding symbol self.pad_idx = task.target_dictionary.pad() self.model.transformer.wte.weight.data[self.pad_idx].zero_() self.model.transformer.wpe.weight.data[0].zero_() def forward( self, prev_output_tokens, src_lengths=None, incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, encoder_out=None, ): features = self.extract_features(prev_output_tokens, incremental_state) lm_logits = self.model.lm_head(features) return (lm_logits, ) def extract_features( self, prev_output_tokens, incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, ): if incremental_state: past = self.get_incremental_state("past") else: past = None # don't attend to padding symbols attention_mask = prev_output_tokens.ne(self.pad_idx).int() # set position ids to exclude padding symbols position_ids = attention_mask * ( torch.arange(1, 1 + prev_output_tokens.size(1)) .to(prev_output_tokens) .repeat(prev_output_tokens.size(0), 1) ) outputs = self.model.transformer( input_ids=prev_output_tokens, past=past, attention_mask=attention_mask, position_ids=position_ids, ) last_hidden_states = outputs[0] if incremental_state: self.set_incremental_state(incremental_state, "past", outputs[1]) return last_hidden_states def max_positions(self): return self.model.config.n_positions - 1 @register_model_architecture('hf_gpt2', 'hf_gpt2') def default_architecture(args): if getattr(args, 'max_target_positions', None) is None: args.max_target_positions = getattr( args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS ) args.embed_dim = getattr(args, 'embed_dim', 768) args.num_attention_heads = getattr(args, 'num_attention_heads', 12) args.num_layers = getattr(args, 'num_layers', 12) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) @register_model_architecture('hf_gpt2', 'hf_gpt2_medium') def hf_gpt2_medium(args): args.embed_dim = getattr(args, 'embed_dim', 1024) args.num_attention_heads = getattr(args, 'num_attention_heads', 16) args.num_layers = getattr(args, 'num_layers', 24) default_architecture(args) @register_model_architecture('hf_gpt2', 'hf_gpt2_large') def hf_gpt2_large(args): args.embed_dim = getattr(args, 'embed_dim', 1280) args.num_attention_heads = getattr(args, 'num_attention_heads', 20) args.num_layers = getattr(args, 'num_layers', 36) default_architecture(args) @register_model_architecture('hf_gpt2', 'hf_gpt2_xl') def hf_gpt2_xl(args): args.embed_dim = getattr(args, 'embed_dim', 1600) args.num_attention_heads = getattr(args, 'num_attention_heads', 25) args.num_layers = getattr(args, 'num_layers', 48) default_architecture(args)
7,034
34.175
86
py
RegularizedBN
RegularizedBN-main/fairseq/model_parallel/modules/transformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.modules import ( TransformerSentenceEncoderLayer ) from fairseq.model_parallel.modules import ModelParallelMultiheadAttention try: from fairseq.model_parallel.megatron.mpu import ( ColumnParallelLinear, RowParallelLinear, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False class ModelParallelTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): """ Implements a Model Parallel Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def build_fc1(self, input_dim, output_dim, **unused): return ColumnParallelLinear(input_dim, output_dim, gather_output=False) def build_fc2(self, input_dim, output_dim, **unused): return RowParallelLinear(input_dim, output_dim, input_is_parallel=True) def build_self_attention( self, embed_dim, num_attention_heads, dropout, **kwargs, ): return ModelParallelMultiheadAttention( embed_dim, num_attention_heads, dropout=dropout, self_attention=True ) def forward( self, x: torch.Tensor, self_attn_mask: torch.Tensor = None, self_attn_padding_mask: torch.Tensor = None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer imlementation. """ residual = x x = self.self_attn_layer_norm(x) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask, ) x = self.dropout_module(x) x = residual + x residual = x x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x return x, None
2,356
28.4625
84
py
RegularizedBN
RegularizedBN-main/fairseq/model_parallel/modules/multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional, Tuple import torch import torch.nn.functional as F from fairseq import utils from torch import Tensor, nn from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout try: from fairseq.model_parallel.megatron.mpu import ( get_cuda_rng_tracker, get_model_parallel_world_size, ColumnParallelLinear, RowParallelLinear, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False @with_incremental_state class ModelParallelMultiheadAttention(nn.Module): """Model parallel Multi-headed attention. This performs the Multi-headed attention over multiple gpus. See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, self_attention=False, encoder_decoder_attention=False, ): super().__init__() if not has_megatron_submodule: raise ImportError( '\n\nPlease install the megatron submodule:' '\n\n git submodule update --init ' 'fairseq/model_parallel/megatron' ) self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.model_parallel_size = get_model_parallel_world_size() self.num_heads_partition = num_heads // self.model_parallel_size assert ( self.num_heads_partition * self.model_parallel_size == num_heads ), "Number of heads must be divisble by model parallel size" self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and value to be of the same size" ) self.k_proj = ColumnParallelLinear(self.kdim, embed_dim, bias=bias, gather_output=False) self.v_proj = ColumnParallelLinear(self.vdim, embed_dim, bias=bias, gather_output=False) self.q_proj = ColumnParallelLinear(embed_dim, embed_dim, bias=bias, gather_output=False) self.out_proj = RowParallelLinear(embed_dim, embed_dim, bias=bias, input_is_parallel=True) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, static_kv: bool = False, attn_mask: Optional[Tensor] = None, **unused_kwargs, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). """ tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads_partition, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads_partition, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads_partition, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads_partition, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads_partition, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = ModelParallelMultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads_partition, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads_partition, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None src_len = k.size(1) # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads_partition, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads_partition, tgt_len, src_len) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf") ) attn_weights = attn_weights.view(bsz * self.num_heads_partition, tgt_len, src_len) attn_weights_float = utils.softmax( attn_weights, dim=-1 ) attn_weights = attn_weights_float.type_as(attn_weights) with get_cuda_rng_tracker().fork(): attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads_partition, tgt_len, self.head_dim] embed_dim_partition = embed_dim // self.model_parallel_size attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition) attn = self.out_proj(attn) # return attn_weights None to keep the return type same as single gpu multihead attention # This will be deprecated. attn_weights: Optional[Tensor] = None return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1)) if prev_key_padding_mask.is_cuda: filler = filler.cuda() new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) elif key_padding_mask is not None: filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1)) if key_padding_mask.is_cuda: filler = filler.cuda() new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): if input_buffer[k] is not None: input_buffer[k] = input_buffer[k].index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer)
12,558
39.124601
98
py
RegularizedBN
RegularizedBN-main/fairseq/model_parallel/modules/transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules import ( LayerNorm, MultiheadAttention, PositionalEmbedding, TransformerSentenceEncoder, ) from fairseq.model_parallel.modules import ( ModelParallelTransformerSentenceEncoderLayer, ) try: from fairseq.model_parallel.megatron.mpu import ( copy_to_model_parallel_region, gather_from_model_parallel_region, VocabParallelEmbedding, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False import random class ModelParallelTransformerSentenceEncoder(TransformerSentenceEncoder): """ Implementation for a Model Parallel Bi-directional Transformer based Sentence Encoder used in BERT/XLM style pre-trained models. """ def build_embedding(self, vocab_size, embedding_dim, padding_idx): return VocabParallelEmbedding(vocab_size, embedding_dim, padding_idx) def build_transformer_sentence_encoder_layer( self, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, export, **unused, ): return ModelParallelTransformerSentenceEncoderLayer( embedding_dim=embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, )
1,884
28
77
py
RegularizedBN
RegularizedBN-main/fairseq/model_parallel/models/transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( register_model, ) from fairseq.models.transformer import ( TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.model_parallel.modules import ( ModelParallelTransformerDecoderLayer, ModelParallelTransformerEncoderLayer, ) try: from fairseq.model_parallel.megatron.mpu import ( copy_to_model_parallel_region, gather_from_model_parallel_region, VocabParallelEmbedding, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False logger = logging.getLogger(__name__) @register_model('model_parallel_transformer') class ModelParallelTransformerModel(TransformerModel): """ Model parallel Transformer model. """ @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): if not has_megatron_submodule: raise ImportError( '\n\nPlease install the megatron submodule:' '\n\n git submodule update --init ' 'fairseq/model_parallel/megatron' ) num_embeddings = len(dictionary) padding_idx = dictionary.pad() def _vocab_init(tensor, **kwargs): nn.init.normal_(tensor, mean=0, std=num_embeddings ** -0.5) nn.init.constant_(tensor[1], 0) emb = VocabParallelEmbedding(num_embeddings, embed_dim, padding_idx, init_method=_vocab_init) # if provided, load from preloaded dictionaries if path: raise NotImplementedError("Loading of embedding from path is not supported for model parallel") return emb @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return ModelParallelTransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return ModelParallelTransformerDecoder( args, tgt_dict, embed_tokens, no_encoder_attn=getattr(args, 'no_cross_attention', False), ) class ModelParallelTransformerEncoder(TransformerEncoder): """ Model parallel Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`ModelParallelTransformerEncoderLayer`. """ def build_encoder_layer(self, args): return ModelParallelTransformerEncoderLayer(args) class ModelParallelTransformerDecoder(TransformerDecoder): """ Model Parallel Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`ModelParallelTransformerDecoderLayer`. """ def build_decoder_layer(self, args, no_encoder_attn=False): return ModelParallelTransformerDecoderLayer(args, no_encoder_attn) def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if not self.share_input_output_embed: raise NotImplementedError( 'Model parallel training currently requires --share-decoder-input-output-embed' ) features = copy_to_model_parallel_region(features) # project back to size of vocabulary x = self.output_projection(features) if getattr(self.args, 'criterion') != 'vocab_parallel_cross_entropy': x = gather_from_model_parallel_region(x).contiguous() return x
3,642
31.238938
107
py
RegularizedBN
RegularizedBN-main/fairseq/model_parallel/models/transformer_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer_lm import ( base_lm_architecture, TransformerLanguageModel, ) from fairseq.model_parallel.models.transformer import ModelParallelTransformerDecoder try: from fairseq.model_parallel.megatron.mpu import VocabParallelEmbedding has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model('model_parallel_transformer_lm') class ModelParallelTransformerLanguageModel(TransformerLanguageModel): @classmethod def build_model(cls, args, task): """Build a new model instance.""" if not has_megatron_submodule: raise ImportError( '\n\nPlease install the megatron submodule:' '\n\n git submodule update --init ' 'fairseq/model_parallel/megatron' ) # make sure all arguments are present in older models base_lm_architecture(args) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, 'max_target_positions', None) is None: args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS) if args.character_embeddings: raise NotImplementedError("Character embeddings is not supported for model parallel") elif args.adaptive_input: raise NotImplementedError("Adaptive input is not supported for model parallel") else: embed_tokens = cls.build_embedding(args, task.source_dictionary, args.decoder_input_dim) decoder = ModelParallelTransformerDecoder( args, task.target_dictionary, embed_tokens, no_encoder_attn=True, ) return cls(decoder) @classmethod def build_embedding(cls, args, dictionary, embed_dim, path=None): def _vocab_init(tensor, **kwargs): nn.init.normal_(tensor, mean=0, std=embed_dim ** -0.5) nn.init.constant_(tensor[1], 0) embed_tokens = VocabParallelEmbedding(len(dictionary), embed_dim, dictionary.pad(), init_method=_vocab_init) return embed_tokens @register_model_architecture('model_parallel_transformer_lm', 'transformer_lm_megatron') def transformer_lm_megatron(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 3072) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072 * 4) args.decoder_layers = getattr(args, 'decoder_layers', 72) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 32) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args) @register_model_architecture('model_parallel_transformer_lm', 'transformer_lm_megatron_11b') def transformer_lm_megatron_11b(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 3072) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072 * 6) args.decoder_layers = getattr(args, 'decoder_layers', 72) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 32) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args)
3,752
41.168539
116
py
RegularizedBN
RegularizedBN-main/fairseq/model_parallel/models/roberta/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ RoBERTa: A Robustly Optimized BERT Pretraining Approach. """ import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, register_model, register_model_architecture, ) from fairseq.models.roberta import ( RobertaModel, RobertaEncoder, RobertaLMHead, RobertaClassificationHead, ) from fairseq.modules import ( LayerNorm, TransformerSentenceEncoder, ) from fairseq.model_parallel.modules import ( ModelParallelTransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params try: from fairseq.model_parallel.megatron.mpu import ( copy_to_model_parallel_region, gather_from_model_parallel_region, ColumnParallelLinear, RowParallelLinear, ) has_megatron_submodule = True except (ImportError, ModuleNotFoundError): has_megatron_submodule = False logger = logging.getLogger(__name__) @register_model('model_parallel_roberta') class ModelParallelRobertaModel(RobertaModel): def __init__(self, args, encoder): super().__init__(args, encoder) self.classification_heads = nn.ModuleDict() @staticmethod def add_args(parser): super(ModelParallelRobertaModel, ModelParallelRobertaModel).add_args(parser) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_architecture(args) if not hasattr(args, 'max_positions'): args.max_positions = args.tokens_per_sample encoder = ModelParallelRobertaEncoder(args, task.source_dictionary) return cls(args, encoder) def forward(self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None, **kwargs): if classification_head_name is not None: features_only = True x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs) if classification_head_name is not None: x = self.classification_heads[classification_head_name](x) return x, extra def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs): """Register a classification head.""" if name in self.classification_heads: prev_num_classes = self.classification_heads[name].out_proj.out_features prev_inner_dim = self.classification_heads[name].dense.out_features if num_classes != prev_num_classes or inner_dim != prev_inner_dim: logger.warning( 're-registering head "{}" with num_classes {} (prev: {}) ' 'and inner_dim {} (prev: {})'.format( name, num_classes, prev_num_classes, inner_dim, prev_inner_dim ) ) self.classification_heads[name] = ModelParallelRobertaClassificationHead( self.args.encoder_embed_dim, inner_dim or self.args.encoder_embed_dim, num_classes, self.args.pooler_activation_fn, self.args.pooler_dropout, ) class ModelParallelRobertaLMHead(nn.Module): """Head for masked language modeling.""" def __init__(self, embed_dim, output_dim, activation_fn, weight=None): super().__init__() self.dense = ColumnParallelLinear(embed_dim, embed_dim, gather_output=True) self.activation_fn = utils.get_activation_fn(activation_fn) self.layer_norm = LayerNorm(embed_dim) if weight is None: weight = nn.Linear(embed_dim, output_dim, bias=False).weight self.weight = weight self.bias = nn.Parameter(torch.zeros(output_dim)) def forward(self, features, masked_tokens=None, **kwargs): # Only project the unmasked tokens while training, # saves both memory and computation if masked_tokens is not None: features = features[masked_tokens, :] x = self.dense(features) x = self.activation_fn(x) x = self.layer_norm(x) features = copy_to_model_parallel_region(features) # project back to size of vocabulary with bias x = F.linear(x, self.weight) x = gather_from_model_parallel_region(x).contiguous() x = x + self.bias return x class ModelParallelRobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout): super().__init__() self.dense = ColumnParallelLinear(input_dim, inner_dim, gather_output=True) self.activation_fn = utils.get_activation_fn(activation_fn) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = self.activation_fn(x) x = self.dropout(x) x = self.out_proj(x) return x class ModelParallelRobertaEncoder(FairseqEncoder): """RoBERTa encoder. Implements the :class:`~fairseq.models.FairseqDecoder` interface required by :class:`~fairseq.models.FairseqLanguageModel`. """ def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args # RoBERTa is a sentence encoder model, so users will intuitively trim # encoder layers. However, the implementation uses the fairseq decoder, # so we fix here. if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) args.decoder_layers_to_keep = args.encoder_layers_to_keep args.encoder_layers_to_keep = None self.sentence_encoder = ModelParallelTransformerSentenceEncoder( padding_idx=dictionary.pad(), vocab_size=len(dictionary), num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, layerdrop=args.encoder_layerdrop, max_seq_len=args.max_positions, num_segments=0, encoder_normalize_before=False, apply_bert_init=False, activation_fn=args.activation_fn, ) self.lm_head = ModelParallelRobertaLMHead( embed_dim=args.encoder_embed_dim, output_dim=len(dictionary), activation_fn=args.activation_fn, weight=self.sentence_encoder.embed_tokens.weight, ) def forward(self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, **unused): """ Args: src_tokens (LongTensor): input tokens of shape `(batch, src_len)` features_only (bool, optional): skip LM head and just return features. If True, the output will be of shape `(batch, src_len, embed_dim)`. return_all_hiddens (bool, optional): also return all of the intermediate hidden states (default: False). Returns: tuple: - the LM output of shape `(batch, src_len, vocab)` - a dictionary of additional data, where 'inner_states' is a list of hidden states. Note that the hidden states have shape `(src_len, batch, vocab)`. """ x, extra = self.extract_features(src_tokens, return_all_hiddens=return_all_hiddens) if not features_only: x = self.output_layer(x, masked_tokens=masked_tokens) return x, extra def extract_features(self, src_tokens, return_all_hiddens=False, **unused): inner_states, _ = self.sentence_encoder( src_tokens, last_state_only=not return_all_hiddens, ) features = inner_states[-1].transpose(0, 1) # T x B x C -> B x T x C return features, {'inner_states': inner_states if return_all_hiddens else None} def output_layer(self, features, masked_tokens=None, **unused): return self.lm_head(features, masked_tokens) def max_positions(self): """Maximum output length supported by the encoder.""" return self.args.max_positions @register_model_architecture('model_parallel_roberta', 'model_parallel_roberta') def base_architecture(args): args.encoder_layers = getattr(args, 'encoder_layers', 12) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.0) args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0) args.encoder_layers_to_keep = getattr(args, 'encoder_layers_to_keep', None) args.encoder_layerdrop = getattr(args, 'encoder_layerdrop', 0.0) @register_model_architecture('model_parallel_roberta', 'model_parallel_roberta_base') def roberta_base_architecture(args): base_architecture(args) @register_model_architecture('model_parallel_roberta', 'model_parallel_roberta_large') def roberta_large_architecture(args): args.encoder_layers = getattr(args, 'encoder_layers', 24) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) base_architecture(args)
10,367
37.542751
122
py
RegularizedBN
RegularizedBN-main/fairseq/optim/bmuf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.distributed as dist from . import FairseqOptimizer class FairseqBMUF(FairseqOptimizer): """ Implements incremental block distributed data parallelism similar to https://ieeexplore.ieee.org/document/7472805 Paper title: Scalable training of deep learning machines by incremental block training with intra-block parallel optimization and blockwise model-update filtering """ def __init__(self, args, optimizer): super().__init__(args) self._optimizer = optimizer self._num_updates = 0 self.sync_iter = self.args.global_sync_iter self.block_momentum = self.args.block_momentum self.block_lr = self.args.block_lr self._reset_local_data() self.warmup_iteration = self.args.warmup_iterations self.use_nbm = self.args.use_nbm self.initial_state = self._optimizer.state_dict() self.average_sync = self.args.average_sync self.world_size = self.args.distributed_world_size @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" parser.add_argument( "--block-lr", default=1, type=float, help="block learning rate for bmuf" ) parser.add_argument( "--block-momentum", default=0.875, type=float, help="block momentum for bmuf", ) parser.add_argument( "--global-sync-iter", default=50, type=int, help="Iteration for syncing global model", ) parser.add_argument( "--warmup-iterations", default=500, type=int, help="warmup iterations for model to broadcast", ) parser.add_argument( "--use-nbm", default=False, action="store_true", help="Specify whether you want to use classical BM / Nesterov BM", ) parser.add_argument( "--average-sync", default=False, action="store_true", help="Specify whether you want to average the local momentum after each sync", ) @property def optimizer(self): return self._optimizer.optimizer @property def optimizer_config(self): return self._optimizer.optimizer_config def get_lr(self): return self._optimizer.get_lr() def set_lr(self, lr): self._optimizer.set_lr(lr) def state_dict(self): return self._optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): self._optimizer.load_state_dict(state_dict, optimizer_overrides) self.initial_state = self._optimizer.state_dict() def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" self._optimizer.multiply_grads(c) def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" return self._optimizer.clip_grad_norm(max_norm, aggregate_norm_fn) def average_params(self): self._optimizer.average_params() def _block_sync(self): if self.world_size <= 1: return # Update the global model using local models from all GPUs # (Step-1) Calculate grad between previously synced model and # currrent local model if self.block_momentum != 0: self._calc_grad() # (Step-2) Average gradient from all GPUs self._avg_grad_from_all_gpus() # (Step-3) Calculate global momentum and update the global model if self.block_momentum != 0: self._update_global_model() # (Step-4) Average local optimizer params if self.average_sync: self.average_params() def _is_warmup_end(self): # Check whether train iterations is equal to warmup iter if self.get_num_updates() == self.warmup_iteration: return True return False def _is_bmuf_iter(self): # Check whether train iterations is equal to bmuf sync iter if (self.get_num_updates() > self.warmup_iteration) and ( self.get_num_updates() % self.sync_iter == 0 ): return True return False def _warmup_sync(self, root_rank=0): if self.world_size <= 1: return # Broadcast the local model to all gpus for param in self.params: dist.broadcast(param.data, src=root_rank) # Update local optimizer state if self.average_sync: self._optimizer.average_params() else: self._optimizer.load_state_dict(self.initial_state) self._reset_local_data() def step(self, closure=None): """Performs a single optimization step.""" self._optimizer.step(closure) self.set_num_updates(self.get_num_updates() + 1) if self._is_warmup_end(): self._warmup_sync() elif self._is_bmuf_iter(): self._block_sync() def zero_grad(self): """Clears the gradients of all optimized parameters.""" self._optimizer.zero_grad() def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates @torch.no_grad() def _reset_local_data(self): # (Step-0) Initialize global momentum parameters and store global copy on each gpu self.global_params = [torch.zeros_like(p.data) for p in self.params] self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params] self.grads = [p.data.new_zeros(p.data.size()) for p in self.params] # saving the global model locally for calculating gradient during bmuf sync for param, global_param in zip(self.params, self.global_params): global_param.copy_(param.data) @torch.no_grad() def _calc_grad(self): # global_params is basically the global copy from the previously finished # synchronisation. param.data is local parameter after block_sync_freq # for the local gpu. so grad is difference between previously synced # model and currrent local model. for index, (param, global_param) in enumerate( zip(self.params, self.global_params) ): self.grads[index] = global_param - param.data def _avg_grad_from_all_gpus(self): for index, param in enumerate(self.params): sync_para = param.data if self.block_momentum == 0 else self.grads[index] sync_para /= float(dist.get_world_size()) dist.all_reduce(sync_para, op=dist.ReduceOp.SUM) @torch.no_grad() def _update_global_model(self): for index, (param, global_param, smoothed_grad, grad) in enumerate( zip( self.params, self.global_params, self.smoothed_grads, # all gpus would share the same value of smoothed_grad, since it is # always computed on synchronized gradients. self.grads, ) ): # global_param is basically last syncrhornized parameter. though # smoothed_grad is local, all processes will have same value of # smoothed_grad and hence param is globally synchronized copy. # smoothed_grad(t) = BM * smoothed_grad(t-1) + BM_lr * grad(t) smoothed_grad = self.block_momentum * smoothed_grad + self.block_lr * grad param.data.copy_(global_param - smoothed_grad) # A Nesterov momentum here is to do a partial weight update before # calculating the gradient if self.use_nbm: param.data.copy_(param.data - self.block_momentum * smoothed_grad) # backup for the next synchronization. self.smoothed_grads[index] = smoothed_grad global_param.copy_(param.data)
8,282
34.857143
90
py
RegularizedBN
RegularizedBN-main/fairseq/optim/nag.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch.optim.optimizer import Optimizer, required from . import FairseqOptimizer, register_optimizer @register_optimizer('nag') class FairseqNAG(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = NAG(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--momentum', default=0.99, type=float, metavar='M', help='momentum factor') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay, } class NAG(Optimizer): def __init__(self, params, lr=required, momentum=0, weight_decay=0): defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay) super(NAG, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] lr = group['lr'] lr_old = group.get('lr_old', lr) lr_correct = lr / lr_old for p in group['params']: if p.grad is None: continue p_data_fp32 = p.data if p_data_fp32.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() d_p = p.grad.data.float() param_state = self.state[p] if 'momentum_buffer' not in param_state: param_state['momentum_buffer'] = torch.zeros_like(d_p) else: param_state['momentum_buffer'] = param_state['momentum_buffer'].to(d_p) buf = param_state['momentum_buffer'] if weight_decay != 0: p_data_fp32.mul_(1 - lr * weight_decay) p_data_fp32.add_(buf, alpha=momentum * momentum * lr_correct) p_data_fp32.add_(d_p, alpha=-(1 + momentum) * lr) buf.mul_(momentum * lr_correct).add_(d_p, alpha=-lr) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) group['lr_old'] = lr return loss
3,485
32.519231
92
py
RegularizedBN
RegularizedBN-main/fairseq/optim/sgd.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('sgd') class SGD(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.SGD(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--momentum', default=0.0, type=float, metavar='M', help='momentum factor') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay, } @property def supports_flat_params(self): return True
1,430
31.522727
92
py
RegularizedBN
RegularizedBN-main/fairseq/optim/radam.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import types import torch import torch.optim # from ipdb import set_trace from fairseq.optim import FairseqOptimizer, register_optimizer # from tensorboardX import SummaryWriter # # writer = SummaryWriter(logdir='./log/wmt/') # writer = SummaryWriter(logdir='./log/ada/') iter_idx = 0 @register_optimizer('radam') class FairseqRAdam(FairseqOptimizer): def __init__(self, args, params): #super().__init__(args, params) super().__init__(args) self._optimizer = RAdam(params, **self.optimizer_config) self._optimizer.name = args.tb_tag + '_' + self._optimizer.name @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D', help='epsilon for Adam optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--tb-tag', default="", type=str, help='tb tag') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay, } class RAdam(torch.optim.Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) self.name = '{}_{}_{}'.format(lr, betas[0], betas[1]) super(RAdam, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ global iter_idx iter_idx += 1 grad_list = list() mom_list = list() mom_2rd_list = list() assert 'adam_1k' not in self.name writer_iter = iter_idx loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] p_data_fp32 = p.data.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) exp_avg.mul_(beta1).add_(1 - beta1, grad) state['step'] += 1 beta2_t = beta2 ** state['step'] N_sma_max = 2 / (1 - beta2) - 1 N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) if group['weight_decay'] != 0: p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) # more conservative since it's an approximated value if N_sma >= 5: step_size = group['lr'] * math.sqrt((1 - beta2_t ) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) * (N_sma_max) / N_sma / (N_sma_max - 2)) / (1 - beta1 ** state['step']) denom = exp_avg_sq.sqrt().add_(group['eps']) p_data_fp32.addcdiv_(-step_size, exp_avg, denom) else: step_size = group['lr'] / (1 - beta1 ** state['step']) p_data_fp32.add_(-step_size, exp_avg) p.data.copy_(p_data_fp32) # if writer_iter > 0 and writer_iter % 300 == 0 or writer_iter in [1, 5, 10, 25, 50, 75, 100, 150, 200]: # grad_list.extend( grad.abs().add_(1e-9).log().view(-1).tolist() ) # mom_list.extend( exp_avg.abs().add_(1e-9).log().view(-1).tolist() ) # mom_2rd_list.extend( exp_avg_sq.abs().add_(1e-9).log().view(-1).tolist() ) # if writer_iter > 0 and writer_iter % 300 == 0 or writer_iter in [1, 5, 10, 25, 50, 75, 100, 150, 200]: # writer.add_histogram('grad/{}'.format(self.name), grad_list, writer_iter) # writer.add_histogram('mom/{}'.format(self.name), mom_list, writer_iter) # writer.add_histogram('mom_sq/{}'.format(self.name), mom_2rd_list, writer_iter) return loss
5,993
38.695364
188
py
RegularizedBN
RegularizedBN-main/fairseq/optim/adamax.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('adamax') class FairseqAdamax(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = Adamax(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adamax-eps', type=float, default=1e-8, metavar='D', help='epsilon for Adam optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--no-bias-correction', default=False, action='store_true', help='disable bias correction') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'betas': eval(self.args.adamax_betas), 'eps': self.args.adamax_eps, 'weight_decay': self.args.weight_decay, 'bias_correction': not self.args.no_bias_correction, } class Adamax(torch.optim.Optimizer): """Implements Adamax algorithm (a variant of Adam based on infinity norm). It has been proposed in `Adam: A Method for Stochastic Optimization`__. Compared to the version in PyTorch, this version implements a fix for weight decay. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) bias_correction (bool, optional): enable bias correction (default: True) __ https://arxiv.org/abs/1412.6980 """ def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, bias_correction=True): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction) super(Adamax, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adamax does not support sparse gradients') p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_inf'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].to(p_data_fp32) state['exp_inf'] = state['exp_inf'].to(p_data_fp32) exp_avg, exp_inf = state['exp_avg'], state['exp_inf'] beta1, beta2 = group['betas'] eps = group['eps'] state['step'] += 1 # Update biased first moment estimate. exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # Update the exponentially weighted infinity norm. torch.max( exp_inf.mul_(beta2), grad.abs_(), out=exp_inf, ) step_size = group['lr'] if group['bias_correction']: bias_correction = 1 - beta1 ** state['step'] step_size /= bias_correction if group['weight_decay'] != 0: p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * group['lr']) p_data_fp32.addcdiv_(exp_avg, exp_inf.add(eps), value=-step_size) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
6,084
37.27044
93
py
RegularizedBN
RegularizedBN-main/fairseq/optim/fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import chain import torch from fairseq import optim, utils from .dynamic_loss_scaler import DynamicLossScaler class _FP16OptimizerMixin(object): def __init__(self, *args, **kwargs): # forward __init__ call to the next class in mro(method resolution order) super().__init__(*args, **kwargs) @property def has_flat_params(self): return torch.is_tensor(self.fp32_params) @classmethod def build_fp32_params(cls, params, flatten=True): # create FP32 copy of parameters and grads if flatten: total_param_size = sum(p.data.numel() for p in params) fp32_params = torch.zeros(total_param_size, dtype=torch.float, device=params[0].device) offset = 0 for p in params: numel = p.data.numel() fp32_params[offset:offset+numel].copy_(p.data.view(-1)) offset += numel fp32_params = torch.nn.Parameter(fp32_params) fp32_params.grad = fp32_params.data.new(total_param_size) return fp32_params else: fp32_params = [] for p in params: p32 = torch.nn.Parameter(p.data.float()) p32.grad = torch.zeros_like(p32.data) fp32_params.append(p32) return fp32_params def state_dict(self): """Return the optimizer's state dict.""" state_dict = self.fp32_optimizer.state_dict() if self.scaler is not None: state_dict['loss_scale'] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ if 'loss_scale' in state_dict and self.scaler is not None: self.scaler.loss_scale = state_dict['loss_scale'] self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ if self.scaler is not None: loss = self.scaler.scale(loss) loss.backward() self._needs_sync = True def _sync_fp16_grads_to_fp32(self): if self._needs_sync: # copy FP16 grads to FP32 if self.has_flat_params: offset = 0 for p in self.fp16_params: if not p.requires_grad: continue grad_data = p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape) numel = grad_data.numel() self.fp32_params.grad.data[offset:offset+numel].copy_(grad_data.view(-1)) offset += numel else: for p, p32 in zip(self.fp16_params, self.fp32_params): if not p.requires_grad: continue if p.grad is not None: p32.grad.data.copy_(p.grad.data) else: p32.grad = torch.zeros_like(p.data, dtype=torch.float) self._needs_sync = False def _sync_fp32_params_to_fp16(self): # copy FP32 params back into FP16 model if self.has_flat_params: offset = 0 for p in self.fp16_params: if not p.requires_grad: continue numel = p.data.numel() p.data.copy_(self.fp32_params.data[offset:offset+numel].view_as(p.data)) offset += numel else: for p, p32 in zip(self.fp16_params, self.fp32_params): if not p.requires_grad: continue p.data.copy_(p32.data) def _unscale_grads(self): self._sync_fp16_grads_to_fp32() if self._multiply_factor != 1.: self.fp32_optimizer.multiply_grads(self._multiply_factor) self._multiply_factor = 1. def multiply_grads(self, c): """Multiplies grads by a constant ``c``.""" self._multiply_factor *= c def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm and updates dynamic loss scaler.""" self._sync_fp16_grads_to_fp32() grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm(0, aggregate_norm_fn) if self.scaler is not None: if grad_norm > max_norm > 0.0: self._multiply_factor *= max_norm / grad_norm self.scaler.check_overflow(grad_norm) else: clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1) self._multiply_factor *= clip_coef return grad_norm def step(self, closure=None): """Performs a single optimization step.""" self._sync_fp16_grads_to_fp32() if getattr(self, 'supports_step_with_scale', False): self.fp32_optimizer.step(closure, scale=(1. / self._multiply_factor)) else: self._unscale_grads() self.fp32_optimizer.step(closure) if self.scaler is not None: self.scaler.update() self._sync_fp32_params_to_fp16() def zero_grad(self): """Clears the gradients of all optimized parameters.""" for p in self.fp16_params: p.grad = None if self.has_flat_params: self.fp32_params.grad.zero_() else: for p32 in self.fp32_params: p32.grad.zero_() self._needs_sync = False if self.scaler is not None: self._multiply_factor = 1. / float(self.scaler.loss_scale) class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer): """ Wrap an *optimizer* to support FP16 (mixed precision) training. """ def __init__(self, args, params, fp32_optimizer, fp32_params): super().__init__(args) self.fp16_params = params self.fp32_optimizer = fp32_optimizer self.fp32_params = fp32_params if getattr(args, 'fp16_scale_window', None) is None: if len(args.update_freq) > 1: raise ValueError( '--fp16-scale-window must be given explicitly when using a ' 'custom --update-freq schedule' ) data_parallel_size = int(args.distributed_world_size / args.model_parallel_size) scale_window = int(2**14 / data_parallel_size / args.update_freq[0]) else: scale_window = args.fp16_scale_window if not getattr(args, 'bf16', False): self.scaler = DynamicLossScaler( init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale, min_loss_scale=args.min_loss_scale ) else: # disable loss scaling for bfloat16 self.scaler = None @classmethod def build_optimizer(cls, args, params): """ Args: args (argparse.Namespace): fairseq args params (iterable): iterable of parameters to optimize """ flatten = not getattr(args, 'fp16_no_flatten_grads', False) if getattr(args, 'bf16', False): flatten = False # mixed precision is faster on TPUs without flat grads fp32_params = cls.build_fp32_params(params, flatten=flatten) if flatten: fp32_optimizer = optim.build_optimizer(args, [fp32_params]) else: fp32_optimizer = optim.build_optimizer(args, fp32_params) if flatten and not fp32_optimizer.supports_flat_params: raise RuntimeError( 'chosen optimizer does not support flat params, ' 'please set --fp16-no-flatten-grads' ) return cls(args, params, fp32_optimizer, fp32_params) @property def optimizer(self): return self.fp32_optimizer.optimizer @property def optimizer_config(self): return self.fp32_optimizer.optimizer_config def get_lr(self): return self.fp32_optimizer.get_lr() def set_lr(self, lr): self.fp32_optimizer.set_lr(lr) class _MemoryEfficientFP16OptimizerMixin(object): def __init__(self, *args, **kwargs): # forward __init__ call to the next class in MRO (method resolution order) super().__init__(*args, **kwargs) self._multiply_factor = 1. @property def has_flat_params(self): return False def state_dict(self): """Return the optimizer's state dict.""" state_dict = self.wrapped_optimizer.state_dict() if self.scaler is not None: state_dict['loss_scale'] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ if 'loss_scale' in state_dict and self.scaler is not None: self.scaler.loss_scale = state_dict['loss_scale'] self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides) # Hack: PyTorch automatically casts the optimizer state to match the # type of the current parameters. But with --memory-efficient-fp16 the # params are FP16 while the optimizer state is FP32 and we don't want # to cast. A workaround is to manually copy back the original state # after the optimizer has been loaded. groups = self.optimizer.param_groups saved_groups = state_dict['param_groups'] id_map = { old_id: p for old_id, p in zip( chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)) ) } for k, v in state_dict['state'].items(): if k in id_map: param = id_map[k] self.optimizer.state[param] = v def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves. Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this function additionally dynamically scales the loss to avoid gradient underflow. """ if self.scaler is not None: loss = self.scaler.scale(loss) loss.backward() def _unscale_grads(self): if self._multiply_factor != 1.: self.wrapped_optimizer.multiply_grads(self._multiply_factor) self._multiply_factor = 1. def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" self._multiply_factor *= c def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm and updates dynamic loss scaler.""" max_norm = float(max_norm) grad_norm = self._multiply_factor * self.wrapped_optimizer.clip_grad_norm(0, aggregate_norm_fn) if self.scaler is not None: grad_norm_cpu = float(grad_norm) if grad_norm_cpu > max_norm > 0.: self._multiply_factor *= max_norm / grad_norm_cpu # detect overflow and adjust loss scale self.scaler.check_overflow(grad_norm_cpu) else: clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1) self._multiply_factor *= clip_coef return grad_norm def step(self, closure=None): """Performs a single optimization step.""" if getattr(self, 'supports_step_with_scale', False): # NOTE(msb) optimizer divides by scale factor self.wrapped_optimizer.step(closure, scale=(1. / self._multiply_factor)) else: self._unscale_grads() self.wrapped_optimizer.step(closure) if self.scaler is not None: self.scaler.update() def zero_grad(self): """Clears the gradients of all optimized parameters.""" self.wrapped_optimizer.zero_grad() if self.scaler is not None: self._multiply_factor = 1. / float(self.scaler.loss_scale) class MemoryEfficientFP16Optimizer(_MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer): """ Wrap an *optimizer* to support FP16 (mixed precision) training. Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not maintain an FP32 copy of the model. We instead expect the optimizer to convert the gradients to FP32 internally and sync the results back to the FP16 model params. This significantly reduces memory usage but slightly increases the time spent in the optimizer. Since this wrapper depends on specific functionality in the wrapped optimizer (i.e., on-the-fly conversion of grads to FP32), only certain optimizers can be wrapped. This is determined by the *supports_memory_efficient_fp16* property. """ def __init__(self, args, params, optimizer): if not optimizer.supports_memory_efficient_fp16: raise ValueError( 'Unsupported optimizer: {}'.format(optimizer.__class__.__name__) ) super().__init__(args) self.wrapped_optimizer = optimizer if getattr(args, 'fp16_scale_window', None) is None: if len(args.update_freq) > 1: raise ValueError( '--fp16-scale-window must be given explicitly when using a ' 'custom --update-freq schedule' ) data_parallel_size = int(args.distributed_world_size / args.model_parallel_size) scale_window = 2**14 / data_parallel_size / args.update_freq[0] else: scale_window = args.fp16_scale_window if not getattr(args, 'bf16', False): self.scaler = DynamicLossScaler( init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale, min_loss_scale=args.min_loss_scale ) else: # disable loss scaling for bfloat16 self.scaler = None @classmethod def build_optimizer(cls, args, params): """ Args: args (argparse.Namespace): fairseq args params (iterable): iterable of parameters to optimize """ fp16_optimizer = optim.build_optimizer(args, params) return cls(args, params, fp16_optimizer) @property def optimizer(self): return self.wrapped_optimizer.optimizer @property def optimizer_config(self): return self.wrapped_optimizer.optimizer_config def get_lr(self): return self.wrapped_optimizer.get_lr() def set_lr(self, lr): self.wrapped_optimizer.set_lr(lr)
15,719
36.163121
103
py
RegularizedBN
RegularizedBN-main/fairseq/optim/adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math import types import torch import torch.optim import torch.distributed as dist from fairseq.optim import FairseqOptimizer, register_optimizer from fairseq.optim.fused_adam import get_fused_adam_class logger = logging.getLogger(__name__) @register_optimizer('adam') class FairseqAdam(FairseqOptimizer): """Adam optimizer for fairseq. Important note: this optimizer corresponds to the "AdamW" variant of Adam in its weight decay behavior. As such, it is most closely analogous to torch.optim.AdamW from PyTorch. """ def __init__(self, args, params): super().__init__(args) fused_adam_cls = get_fused_adam_class() use_fused_adam = ( not getattr(args, 'use_old_adam', False) and fused_adam_cls is not None and torch.cuda.is_available() ) if getattr(args, 'tpu', False): # on TPUs we use the Adam defined here, since it # automatically casts gradients to FP32 self._optimizer = Adam(params, **self.optimizer_config) elif use_fused_adam: logger.info('using FusedAdam') self._optimizer = fused_adam_cls(params, **self.optimizer_config) else: #print("call here!"); indeed call here #print(params); self._optimizer = Adam(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D', help='epsilon for Adam optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # Maintain backward compatibility with old checkpoints that have stored # optimizer state as fairseq.optim.adam.Adam. parser.add_argument( "--use-old-adam", action='store_true', default=False, help="Use fairseq.optim.adam.Adam", ) # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay, } def average_params(self): """Reduce Params is only used during BMUF distributed training.""" state_dict = self.optimizer.state_dict() total_gpus = float(dist.get_world_size()) for _, value in state_dict["state"].items(): value["exp_avg"] /= total_gpus value["exp_avg_sq"] /= total_gpus dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM) dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM) class Adam(torch.optim.Optimizer): """Implements Adam algorithm. This implementation is modified from torch.optim.Adam based on: `Fixed Weight Decay Regularization in Adam` (see https://arxiv.org/abs/1711.05101) It has been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(Adam, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state = self.state[p] #字典,包含一些状态变量,比如梯度的累计动量、能量(一阶矩、二阶矩) # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].to(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].to(p_data_fp32) if amsgrad: state['max_exp_avg_sq'] = state['max_exp_avg_sq'].to(p_data_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 if group['weight_decay'] != 0: p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * group['lr']) p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
8,497
39.084906
116
py
RegularizedBN
RegularizedBN-main/fairseq/optim/adafactor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('adafactor') class FairseqAdafactor(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = Adafactor(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar="E", help='epsilons for Adafactor optimizer') parser.add_argument('--clip-threshold', type=float, default=1.0, metavar="C", help='threshold for clipping update root mean square') parser.add_argument('--decay-rate', type=float, default=-0.8, metavar="D", help='decay rate of the second moment estimator') parser.add_argument('--beta1', type=float, default=None, metavar="B", help='beta for first moment estimator. Optional') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--scale-parameter', action='store_true', help='scale learning rate by root mean square of parameter') parser.add_argument('--relative-step', action='store_true', help='set learning rate to inverse square root of timestep,' 'otherwise use external learning rate') parser.add_argument('--warmup-init', action='store_true', help='use relative step for warm-up learning rate schedule') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. Note : Convergence issues empirically observed with fp16 on. Might require search for appropriate configuration. """ return { 'lr': self.args.lr[0], 'eps': eval(self.args.adafactor_eps), 'clip_threshold': self.args.clip_threshold, 'decay_rate': self.args.decay_rate, 'beta1': self.args.beta1, 'weight_decay': self.args.weight_decay, 'scale_parameter': self.args.scale_parameter, # defaults to False 'relative_step': self.args.relative_step, # defaults to False 'warmup_init': self.args.warmup_init, } class Adafactor(torch.optim.Optimizer): """Implements Adafactor algorithm. This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` (see https://arxiv.org/abs/1804.04235) Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step* and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): external learning rate (default: None) eps (tuple[float, float]): regularization constans for square gradient and parameter scale respectively (default: (1e-30, 1e-3)) clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) beta1 (float): coefficient used for computing running averages of gradient (default: None) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) relative_step (bool): if True, time-dependent learning rate is computed instead of external learning rate (default: True) warmup_init (bool): time-dependent learning rate computation depends on whether warm-up initialization is being used (default: False) """ def __init__(self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False): if lr is not None and relative_step: raise ValueError('Cannot combine manual lr and relative_step options') if warmup_init and not relative_step: raise ValueError('warmup_init requires relative_step=True') defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init) super(Adafactor, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return False def _get_lr(self, param_group, param_state): rel_step_sz = param_group['lr'] if param_group['relative_step']: min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2 rel_step_sz = min(min_step, 1.0/math.sqrt(param_state['step'])) param_scale = 1.0 if param_group['scale_parameter']: param_scale = max(param_group['eps'][1], param_state['RMS']) return param_scale * rel_step_sz def _get_options(self, param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group['beta1'] is not None return factored, use_first_moment def _rms(self, tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): r_factor = ( exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True) ).rsqrt_() c_factor = exp_avg_sq_col.rsqrt() return torch.mm(r_factor.unsqueeze(-1), c_factor.unsqueeze(0)) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError('Adafactor does not support sparse gradients.') state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state['step'] = 0 if use_first_moment: # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(grad) if factored: state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad) state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0 else: if use_first_moment: state['exp_avg'] = state['exp_avg'].to(grad) if factored: state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) else: state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state['step'] += 1 state['RMS'] = self._rms(p_data_fp32) group['lr'] = self._get_lr(group, state) beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) update = (grad**2) + group['eps'][0] if factored: exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state['exp_avg_sq'] exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) update = exp_avg_sq.rsqrt().mul_(grad) update.div_( (self._rms(update) / group['clip_threshold']).clamp_(min=1.0) ) update.mul_(group['lr']) if use_first_moment: exp_avg = state['exp_avg'] exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) update = exp_avg if group['weight_decay'] != 0: p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * group['lr']) p_data_fp32.add_(-update) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
10,509
43.159664
105
py
RegularizedBN
RegularizedBN-main/fairseq/optim/fused_adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import types import torch def get_fused_adam_class(): """ Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated. """ try: # The "deprecated" interface in recent versions of apex is a bit # faster than the main interface, since we don't use the apex # optimizer. This can be installed by passing the # `--deprecated_fused_adam` option when building apex. global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") return FusedAdamV1 except ImportError: try: # fallback to the newer interface from apex.optimizers import FusedAdam as _FusedAdam # noqa from apex.multi_tensor_apply import multi_tensor_applier if multi_tensor_applier.available: return FusedAdamV2 except ImportError: pass return None class FusedAdamV1(torch.optim.Optimizer): """ Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in FusedAdam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) .. _Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0., max_grad_norm=0., amsgrad=False): global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") if amsgrad: raise RuntimeError('FusedAdam does not support the AMSGrad variant.') defaults = { 'lr': lr, 'bias_correction': bias_correction, 'betas': betas, 'eps': eps, 'weight_decay': weight_decay, 'max_grad_norm': max_grad_norm, } super().__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True @property def supports_step_with_scale(self): return True def step(self, closure=None, grads=None, scale=1., grad_norms=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads if grad_norms is None: grad_norms = [None]*len(self.param_groups) for group, grads_this_group, grad_norm in zip(self.param_groups, grads_group, grad_norms): if grads_this_group is None: grads_this_group = [None]*len(group['params']) # compute combined scale factor for this group combined_scale = scale if group.get('max_grad_norm', 0) > 0: # norm is in fact norm*scale clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm'] if clip > 1: combined_scale = clip * scale bias_correction = 1 if group.get('bias_correction', 1) else 0 for p, grad in zip(group['params'], grads_this_group): # note: p.grad should not ever be set for correct # operation of mixed precision optimizer that sometimes # sends None gradients if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError( 'FusedAdam does not support sparse gradients, ' 'please consider SparseAdam instead' ) p_data_fp32 = p.data.float() state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].to(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].to(p_data_fp32) exp_avg = state['exp_avg'] exp_avg_sq = state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 out_p = p.data with torch.cuda.device(p.device): fused_adam_cuda.adam(p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1, beta2, group['eps'], combined_scale, state['step'], self.eps_mode, bias_correction, group['weight_decay']) return loss try: from apex.optimizers import FusedAdam from apex.multi_tensor_apply import multi_tensor_applier class FusedAdamV2(FusedAdam): """ Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not hasattr(self, 'multi_tensor_adam'): raise Exception('Apex installation is outdated. Please install an updated version of apex.') @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None): """Performs a single optimization step.""" loss = None if closure is not None: loss = closure() for group in self.param_groups: bias_correction = 1 if group['bias_correction'] else 0 beta1, beta2 = group['betas'] # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if 'step' in group: group['step'] += 1 else: group['step'] = 1 # create lists for multi-tensor apply g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], [] g_32, p_32, m_32, v_32 = [], [], [], [] for p in group['params']: if p.grad is None: continue if p.grad.data.is_sparse: raise RuntimeError( 'FusedAdam does not support sparse gradients, ' 'please consider SparseAdam instead' ) state = self.state[p] # State initialization if len(state) == 0: # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data, dtype=torch.float) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=torch.float) else: state['exp_avg'] = state['exp_avg'].to(device=p.data.device, dtype=torch.float) state['exp_avg_sq'] = state['exp_avg_sq'].to(device=p.data.device, dtype=torch.float) if p.dtype == torch.float16: g_16.append(p.grad.data.float()) p_16.append(p.data.float()) orig_p_16.append(p.data) m_16.append(state['exp_avg']) v_16.append(state['exp_avg_sq']) elif p.dtype == torch.float32: g_32.append(p.grad.data) p_32.append(p.data) m_32.append(state['exp_avg']) v_32.append(state['exp_avg_sq']) else: raise RuntimeError('FusedAdam only support fp16 and fp32.') with torch.cuda.device(p.device): if(len(g_16) > 0): multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16], group['lr'], beta1, beta2, group['eps'], group['step'], self.adam_w_mode, bias_correction, group['weight_decay']) for orig_p, p in zip(orig_p_16, p_16): orig_p.copy_(p.data) if(len(g_32) > 0): multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], group['lr'], beta1, beta2, group['eps'], group['step'], self.adam_w_mode, bias_correction, group['weight_decay']) return loss except ImportError: pass
13,372
41.72524
109
py
RegularizedBN
RegularizedBN-main/fairseq/optim/adagrad.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('adagrad') class Adagrad(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'weight_decay': self.args.weight_decay, } @property def supports_flat_params(self): return True
1,266
29.902439
92
py
RegularizedBN
RegularizedBN-main/fairseq/optim/fairseq_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils class FairseqOptimizer(object): def __init__(self, args): super().__init__() self.args = args @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" pass @property def optimizer(self): """Return a torch.optim.optimizer.Optimizer instance.""" if not hasattr(self, '_optimizer'): raise NotImplementedError if not isinstance(self._optimizer, torch.optim.Optimizer): raise ValueError('_optimizer must be an instance of torch.optim.Optimizer') return self._optimizer @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ raise NotImplementedError @property def params(self): """Return an iterable of the parameters held by the optimizer.""" for param_group in self.param_groups: for p in param_group['params']: yield p @property def param_groups(self): return self.optimizer.param_groups def __getstate__(self): return self._optimizer.__getstate__() def get_lr(self): """Return the current learning rate.""" return self.param_groups[0]['lr'] def set_lr(self, lr): """Set the learning rate.""" for param_group in self.param_groups: param_group['lr'] = lr def state_dict(self): """Return the optimizer's state dict.""" return self.optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ self.optimizer.load_state_dict(state_dict) if optimizer_overrides is not None and len(optimizer_overrides) > 0: # override learning rate, momentum, etc. with latest values for group in self.param_groups: group.update(optimizer_overrides) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves.""" loss.backward() def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" for p in self.params: if p.grad is not None: p.grad.data.mul_(c) def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn) def step(self, closure=None, scale=1.): """Performs a single optimization step.""" if self.supports_step_with_scale: self.optimizer.step(closure, scale=scale) else: self.optimizer.step(closure) def zero_grad(self): """Clears the gradients of all optimized parameters.""" for p in self.params: p.grad = None self.optimizer.zero_grad() @property def supports_memory_efficient_fp16(self): if hasattr(self.optimizer, 'supports_memory_efficient_fp16'): return self.optimizer.supports_memory_efficient_fp16 return False @property def supports_step_with_scale(self): if hasattr(self.optimizer, 'supports_step_with_scale'): return self.optimizer.supports_step_with_scale return False @property def supports_flat_params(self): """ Whether the optimizer supports collapsing of the model parameters/gradients into a single contiguous Tensor. """ if hasattr(self.optimizer, 'supports_flat_params'): return self.optimizer.supports_flat_params return False def average_params(self): pass
4,361
31.552239
87
py
RegularizedBN
RegularizedBN-main/fairseq/optim/adadelta.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('adadelta') class Adadelta(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients') parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', help='term added to the denominator to improve numerical stability') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'rho': self.args.adadelta_rho, 'eps': self.args.adadelta_eps, 'weight_decay': self.args.weight_decay, } @property def supports_flat_params(self): return True
1,823
37
105
py
RegularizedBN
RegularizedBN-main/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('inverse_sqrt') class InverseSquareRootSchedule(FairseqLRScheduler): """Decay the LR based on the inverse square root of the update number. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter we decay proportional to the number of updates, with a decay factor set to align with the configured learning rate. During warmup:: lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates) lr = lrs[update_num] After warmup:: decay_factor = args.lr * sqrt(args.warmup_updates) lr = decay_factor / sqrt(update_num) """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with inverse_sqrt.' ' Consider --lr-scheduler=fixed instead.' ) warmup_end_lr = args.lr[0] if args.warmup_init_lr < 0: args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first args.warmup_updates self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates # then, decay prop. to the inverse square root of the update number self.decay_factor = warmup_end_lr * args.warmup_updates**0.5 # initial learning rate self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') # fmt: on def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates*self.lr_step else: self.lr = self.decay_factor * num_updates**-0.5 self.optimizer.set_lr(self.lr) return self.lr
2,952
38.905405
97
py
RegularizedBN
RegularizedBN-main/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import FairseqLRScheduler, register_lr_scheduler import math @register_lr_scheduler('tri_stage') class TriStageLRSchedule(FairseqLRScheduler): """Tristage learning rate schedulr Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf Similar to inverse_squre_root scheduler, but tri_stage learning rate employs three stages LR scheduling: - warmup stage, starting from `lr` * `init_lr_scale`, linearly increased to `lr` in `warmup_steps` iterations - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps` iterations - decay stage, after hold stage, decay LR exponetially to `lr` * `final_lr_scale` in `decay_steps`; after that LR is keep as `final_lr_scale` * `lr` During warmup:: init_lr = args.init_lr_scale * args.lr lrs = torch.linspace(init_lr, args.lr, args.warmup_steps) lr = lrs[update_num] During hold:: lr = args.lr During decay:: decay_factor = - math.log(args.final_lr_scale) / args.decay_steps lr = args.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor) After that:: lr = args.lr * args.final_lr_scale """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with tri-stage lr.' ' Consider --lr-scheduler=fixed instead.' ) # calculate LR at each point self.peak_lr = args.lr[0] self.init_lr = args.init_lr_scale * args.lr[0] self.final_lr = args.final_lr_scale * args.lr[0] # remember the steps at each stage self.warmup_steps = args.warmup_steps self.hold_steps = args.hold_steps self.decay_steps = args.decay_steps self.warmup_rate = ( (self.peak_lr - self.init_lr) / self.warmup_steps if self.warmup_steps != 0 else 0 ) self.decay_factor = -math.log(args.final_lr_scale) / args.decay_steps # initial learning rate self.lr = self.init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument( '--warmup-steps', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates' ) parser.add_argument( '--hold-steps', default=20000, type=int, metavar='N', help='steps in hold stage.' ) parser.add_argument( '--decay-steps', default=60000, type=int, metavar='N', help='steps in decay stages' ) parser.add_argument( '--init-lr-scale', default=0.01, type=float, help=""" initial learning rate scale during warmup phase; default is 0.01""") parser.add_argument( '--final-lr-scale', default=0.01, type=float, help="final learning rate scale; default to 0.01" ) # fmt: on def _decide_stage(self, update_step): """ return stage, and the corresponding steps within the current stage """ if update_step < self.warmup_steps: # warmup state return 0, update_step offset = self.warmup_steps if update_step < offset + self.hold_steps: # hold stage return 1, update_step - offset offset += self.hold_steps if update_step <= offset + self.decay_steps: # decay stage return 2, update_step - offset offset += self.decay_steps # still here ? constant lr stage return 3, update_step - offset def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" stage, steps_in_stage = self._decide_stage(num_updates) if stage == 0: self.lr = self.init_lr + self.warmup_rate * steps_in_stage elif stage == 1: self.lr = self.peak_lr elif stage == 2: self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage) elif stage == 3: self.lr = self.final_lr else: raise ValueError("Undefined stage") self.optimizer.set_lr(self.lr) return self.lr
5,062
29.871951
87
py
RegularizedBN
RegularizedBN-main/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim.lr_scheduler from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('reduce_lr_on_plateau') class ReduceLROnPlateau(FairseqLRScheduler): """ Decay the LR by a factor every time the validation loss plateaus. Also comes with optional warmup phase, where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter the lr is adjusted according to original reduce_on_plateau scheme. During warmup:: lrs = torch.linspace( args.warmup_init_lr, args.lr, args.warmup_updates ) lr = lrs[update_num] """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with reduce_lr_on_plateau.' ' Consider --lr-scheduler=fixed instead.' ) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer.optimizer, patience=args.lr_patience, factor=args.lr_shrink, mode='max' if args.maximize_best_checkpoint_metric else 'min', threshold=args.lr_threshold) warmup_end_lr = args.lr[0] # if no warm up, sets initial lr to be args.lr[0] if args.warmup_init_lr < 0: args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first args.warmup_updates if args.warmup_updates > 0: self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates # this flag is either set from arg when no warm up, or set by # step_update() when warmup finishes self.warmup_end = True if args.warmup_updates <= 0 else False # initial learning rate # this self.lr is used only during init and/or warm up period self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)') parser.add_argument('--lr-threshold', default=1e-4, type=float, metavar='LT', help='threshold for measuring the new optimum, ' 'to only focus on significant changes') parser.add_argument('--lr-patience', default=0, type=int, help='number of epochs with no improvement after which ' 'learning rate will be reduced') parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') # fmt: on def state_dict(self): """Return the LR scheduler state dict.""" return { 'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch, } def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.lr_scheduler.best = state_dict['best'] if 'last_epoch' in state_dict: self.lr_scheduler.last_epoch = state_dict['last_epoch'] def step(self, epoch, val_loss=None): """ Update the learning rate at the end of the given epoch if warmup finishes otherwise no update of lr on epoch boundaries """ if val_loss is not None and self.warmup_end is True: self.lr_scheduler.step(val_loss) else: self.lr_scheduler.last_epoch = epoch return self.optimizer.get_lr() def step_update(self, num_updates): """ Update the learning rate after each update.""" # if there is warmup if self.args.warmup_updates > 0: if num_updates <= self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates*self.lr_step self.optimizer.set_lr(self.lr) else: if self.warmup_end is False: self.warmup_end = True # else do nothing return self.optimizer.get_lr()
4,743
40.982301
97
py
RegularizedBN
RegularizedBN-main/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('cosine') class CosineSchedule(FairseqLRScheduler): """Assign LR based on a cyclical schedule that follows the cosine function. See https://arxiv.org/pdf/1608.03983.pdf for details. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured max learning rate (``--max-lr``). During warmup:: lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates) lr = lrs[update_num] After warmup:: lr = lr_min + 0.5*(lr_max - lr_min)*(1 + cos(pi * t_curr / t_i)) where ``t_curr`` is current percentage of updates within the current period range and ``t_i`` is the current period range, which is scaled by ``t_mul`` after every iteration. """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with cosine.' ' Consider --lr-scheduler=fixed instead.' ) warmup_end_lr = args.max_lr if args.warmup_init_lr < 0: args.warmup_init_lr = args.lr[0] self.min_lr = args.lr[0] self.max_lr = args.max_lr assert self.max_lr > self.min_lr, 'max_lr must be more than lr' self.t_mult = args.t_mult self.period = args.lr_period_updates if self.period <= 0: assert args.max_update >= 0, 'Either --max_update or --lr-period-updates must be set' self.period = args.max_update - args.warmup_updates if args.warmup_updates > 0: # linearly warmup for the first args.warmup_updates self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates else: self.lr_step = 1 self.warmup_updates = args.warmup_updates self.lr_shrink = args.lr_shrink # initial learning rate self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" # fmt: off parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') parser.add_argument('--max-lr', type=float, metavar='LR', help='max learning rate, must be more than args.lr') parser.add_argument('--t-mult', default=1, type=float, metavar='LR', help='factor to grow the length of each period') parser.add_argument('--lr-period-updates', default=-1, type=float, metavar='LR', help='initial number of updates per period') parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing') # fmt: on def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates * self.lr_step else: curr_updates = num_updates - self.args.warmup_updates if self.t_mult != 1: i = math.floor(math.log(1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult)) t_i = self.t_mult ** i * self.period t_curr = curr_updates - (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period else: i = math.floor(curr_updates / self.period) t_i = self.period t_curr = curr_updates - (self.period * i) lr_shrink = self.lr_shrink ** i min_lr = self.min_lr * lr_shrink max_lr = self.max_lr * lr_shrink self.lr = min_lr + 0.5 * (max_lr - min_lr) * (1 + math.cos(math.pi * t_curr / t_i)) self.optimizer.set_lr(self.lr) return self.lr
4,758
38.991597
105
py
RegularizedBN
RegularizedBN-main/fairseq/scoring/bleu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ctypes import math import sys import torch from fairseq.scoring import register_scoring class BleuStat(ctypes.Structure): _fields_ = [ ("reflen", ctypes.c_size_t), ("predlen", ctypes.c_size_t), ("match1", ctypes.c_size_t), ("count1", ctypes.c_size_t), ("match2", ctypes.c_size_t), ("count2", ctypes.c_size_t), ("match3", ctypes.c_size_t), ("count3", ctypes.c_size_t), ("match4", ctypes.c_size_t), ("count4", ctypes.c_size_t), ] @register_scoring("sacrebleu") class SacrebleuScorer(object): def __init__(self, *unused): import sacrebleu self.sacrebleu = sacrebleu self.reset() def reset(self, one_init=False): if one_init: raise NotImplementedError self.ref = [] self.sys = [] def add_string(self, ref, pred): self.ref.append(ref) self.sys.append(pred) def score(self, order=4): return self.result_string(order).score def result_string(self, order=4): if order != 4: raise NotImplementedError return self.sacrebleu.corpus_bleu(self.sys, [self.ref]).format() @register_scoring("bleu") class Scorer(object): def __init__(self, pad, eos, unk): self.stat = BleuStat() self.pad = pad self.eos = eos self.unk = unk try: from fairseq import libbleu except ImportError as e: sys.stderr.write("ERROR: missing libbleu.so. run `pip install --editable .`\n") raise e self.C = ctypes.cdll.LoadLibrary(libbleu.__file__) self.reset() def reset(self, one_init=False): if one_init: self.C.bleu_one_init(ctypes.byref(self.stat)) else: self.C.bleu_zero_init(ctypes.byref(self.stat)) def add(self, ref, pred): if not isinstance(ref, torch.IntTensor): raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref))) if not isinstance(pred, torch.IntTensor): raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred))) # don't match unknown words rref = ref.clone() assert not rref.lt(0).any() rref[rref.eq(self.unk)] = -999 rref = rref.contiguous().view(-1) pred = pred.contiguous().view(-1) self.C.bleu_add( ctypes.byref(self.stat), ctypes.c_size_t(rref.size(0)), ctypes.c_void_p(rref.data_ptr()), ctypes.c_size_t(pred.size(0)), ctypes.c_void_p(pred.data_ptr()), ctypes.c_int(self.pad), ctypes.c_int(self.eos), ) def score(self, order=4): psum = sum( math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order] ) return self.brevity() * math.exp(psum / order) * 100 def precision(self): def ratio(a, b): return a / b if b > 0 else 0 return [ ratio(self.stat.match1, self.stat.count1), ratio(self.stat.match2, self.stat.count2), ratio(self.stat.match3, self.stat.count3), ratio(self.stat.match4, self.stat.count4), ] def brevity(self): r = self.stat.reflen / self.stat.predlen return min(1, math.exp(1 - r)) def result_string(self, order=4): assert order <= 4, "BLEU scores for order > 4 aren't supported" fmt = "BLEU{} = {:2.2f}, {:2.1f}" for _ in range(1, order): fmt += "/{:2.1f}" fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})" bleup = [p * 100 for p in self.precision()[:order]] return fmt.format( order, self.score(order=order), *bleup, self.brevity(), self.stat.predlen / self.stat.reflen, self.stat.predlen, self.stat.reflen )
4,141
28.169014
91
py
RegularizedBN
RegularizedBN-main/fairseq/benchmark/dummy_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch.nn.functional as F from fairseq.data import Dictionary from fairseq.models import ( FairseqDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) @register_model('dummy_model') class DummyModel(FairseqLanguageModel): def __init__(self, args, encoder): super().__init__(encoder) self.args = args @staticmethod def add_args(parser): parser.add_argument('--num-layers', type=int, default=24) parser.add_argument('--embed-dim', type=int, default=1024) @classmethod def build_model(cls, args, task): encoder = DummyEncoder( num_embed=len(task.target_dictionary), embed_dim=args.embed_dim, num_layers=args.num_layers, ) return cls(args, encoder) def forward(self, src_tokens, masked_tokens=None, **kwargs): return self.decoder(src_tokens, masked_tokens=masked_tokens) class DummyEncoder(FairseqDecoder): def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24): super().__init__(Dictionary()) self.embed = nn.Embedding( num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0 ) self.layers_a = nn.ModuleList([ nn.Sequential( nn.LayerNorm(embed_dim), nn.Linear(embed_dim, 3*embed_dim), # q, k, v input projection nn.Linear(3*embed_dim, embed_dim), # skip self-attention nn.Linear(embed_dim, embed_dim), # output projection nn.Dropout(), ) for i in range(num_layers) ]) self.layers_b = nn.ModuleList([ nn.Sequential( nn.LayerNorm(embed_dim), nn.Linear(embed_dim, 4*embed_dim), # FFN nn.ReLU(), nn.Linear(4*embed_dim, embed_dim), # FFN nn.Dropout(0.1), ) for i in range(num_layers) ]) self.out_proj = nn.Linear(embed_dim, num_embed) def forward(self, tokens, masked_tokens=None): x = self.embed(tokens) for layer_a, layer_b in zip(self.layers_a, self.layers_b): x = x + layer_a(x) x = x + layer_b(x) x = self.out_proj(x) if masked_tokens is not None: x = x[masked_tokens] return (x,) def max_positions(self): return 1024 def get_normalized_probs(self, net_output, log_probs, sample=None): logits = net_output[0].float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) @register_model_architecture('dummy_model', 'dummy_model') def base_architecture(args): pass
2,971
29.958333
78
py
RegularizedBN
RegularizedBN-main/fairseq/benchmark/dummy_mt.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset from fairseq.tasks import FairseqTask, register_task logger = logging.getLogger(__name__) @register_task('dummy_mt') class DummyMTTask(FairseqTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument('--dict-size', default=49996, type=int) parser.add_argument('--dataset-size', default=100000, type=int) parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments ' 'per sample for BERT dataset') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed dictionary.pad_to_multiple_(8) # often faster if divisible by 8 seq = torch.arange(args.tokens_per_sample + 1) + dictionary.pad() + 1 self.dummy_src = seq[:-1] self.dummy_tgt = seq[1:] @classmethod def setup_task(cls, args, **kwargs): """Setup the task. """ dictionary = Dictionary() for i in range(args.dict_size): dictionary.add_symbol('word{}'.format(i)) logger.info('dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if self.args.max_sentences is not None: bsz = self.args.max_sentences else: bsz = max(1, self.args.max_tokens // self.args.tokens_per_sample) tgt = torch.stack([self.dummy_tgt for _ in range(bsz)]) self.datasets[split] = DummyDataset( { 'id': 1, 'net_input': { 'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]), 'src_lengths': torch.full( (bsz, ), self.args.tokens_per_sample, dtype=torch.long ), 'prev_output_tokens': tgt.clone(), }, 'target': tgt, 'nsentences': bsz, 'ntokens': bsz * self.args.tokens_per_sample, }, num_items=self.args.dataset_size, item_size=self.args.tokens_per_sample, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
3,605
28.801653
84
py
RegularizedBN
RegularizedBN-main/fairseq/benchmark/dummy_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset from fairseq.tasks import FairseqTask, register_task logger = logging.getLogger(__name__) @register_task('dummy_masked_lm') class DummyMaskedLMTask(FairseqTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument('--dict-size', default=49995, type=int) parser.add_argument('--dataset-size', default=100000, type=int) parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments ' 'per sample for BERT dataset') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed # add mask token self.mask_idx = dictionary.add_symbol('<mask>') dictionary.pad_to_multiple_(8) # often faster if divisible by 8 mask_idx = 0 pad_idx = 1 seq = torch.arange(args.tokens_per_sample) + pad_idx + 1 mask = torch.arange(2, args.tokens_per_sample, 7) # ~15% src = seq.clone() src[mask] = mask_idx tgt = torch.full_like(seq, pad_idx) tgt[mask] = seq[mask] self.dummy_src = src self.dummy_tgt = tgt @classmethod def setup_task(cls, args, **kwargs): """Setup the task. """ dictionary = Dictionary() for i in range(args.dict_size): dictionary.add_symbol('word{}'.format(i)) logger.info('dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if self.args.max_sentences is not None: bsz = self.args.max_sentences else: bsz = max(1, self.args.max_tokens // self.args.tokens_per_sample) self.datasets[split] = DummyDataset( { 'id': 1, 'net_input': { 'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]), 'src_lengths': torch.full( (bsz, ), self.args.tokens_per_sample, dtype=torch.long ), }, 'target': torch.stack([self.dummy_tgt for _ in range(bsz)]), 'nsentences': bsz, 'ntokens': bsz * self.args.tokens_per_sample, }, num_items=self.args.dataset_size, item_size=self.args.tokens_per_sample, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
3,840
29.007813
84
py
RegularizedBN
RegularizedBN-main/fairseq/benchmark/dummy_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset from fairseq.tasks import FairseqTask, register_task logger = logging.getLogger(__name__) @register_task('dummy_lm') class DummyLMTask(FairseqTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument('--dict-size', default=49996, type=int) parser.add_argument('--dataset-size', default=100000, type=int) parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments ' 'per sample for BERT dataset') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed dictionary.pad_to_multiple_(8) # often faster if divisible by 8 seq = torch.arange(args.tokens_per_sample + 1) + dictionary.pad() + 1 self.dummy_src = seq[:-1] self.dummy_tgt = seq[1:] @classmethod def setup_task(cls, args, **kwargs): """Setup the task. """ dictionary = Dictionary() for i in range(args.dict_size): dictionary.add_symbol('word{}'.format(i)) logger.info('dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if self.args.max_sentences is not None: bsz = self.args.max_sentences else: bsz = max(1, self.args.max_tokens // self.args.tokens_per_sample) self.datasets[split] = DummyDataset( { 'id': 1, 'net_input': { 'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]), 'src_lengths': torch.full( (bsz, ), self.args.tokens_per_sample, dtype=torch.long ), }, 'target': torch.stack([self.dummy_tgt for _ in range(bsz)]), 'nsentences': bsz, 'ntokens': bsz * self.args.tokens_per_sample, }, num_items=self.args.dataset_size, item_size=self.args.tokens_per_sample, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
3,532
28.689076
84
py
RegularizedBN
RegularizedBN-main/fairseq/data/language_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import data_utils, FairseqDataset logger = logging.getLogger(__name__) def collate( samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, input_feeding=True, pad_to_length=None, ): if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning, pad_to_length=pad_to_length, ) def check_alignment(alignment, src_len, tgt_len): #未执行 if alignment is None or len(alignment) == 0: return False if alignment[:, 0].max().item() >= src_len - 1 or alignment[:, 1].max().item() >= tgt_len - 1: logger.warning("alignment size mismatch found, skipping alignment!") return False return True def compute_alignment_weights(alignments): #未执行 """ Given a tensor of shape [:, 2] containing the source-target indices corresponding to the alignments, a weight vector containing the inverse frequency of each target index is computed. For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then a tensor containing [1., 0.5, 0.5, 1] should be returned (since target index 3 is repeated twice) """ align_tgt = alignments[:, 1] _, align_tgt_i, align_tgt_c = torch.unique(align_tgt, return_inverse=True, return_counts=True) align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]] return 1. / align_weights.float() id = torch.LongTensor([s['id'] for s in samples]) src_tokens = merge( 'source', left_pad=left_pad_source, pad_to_length=pad_to_length['source'] if pad_to_length is not None else None ) # sort by descending source length src_lengths = torch.LongTensor([ s['source'].ne(pad_idx).long().sum() for s in samples ]) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None if samples[0].get('target', None) is not None: #true target = merge( 'target', left_pad=left_pad_target, pad_to_length=pad_to_length['target'] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) tgt_lengths = torch.LongTensor([ s['target'].ne(pad_idx).long().sum() for s in samples ]).index_select(0, sort_order) ntokens = tgt_lengths.sum().item() if samples[0].get('prev_output_tokens', None) is not None: #false prev_output_tokens = merge('prev_output_tokens', left_pad=left_pad_target) elif input_feeding: #true # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( 'target', left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length['target'] if pad_to_length is not None else None, ) else: ntokens = src_lengths.sum().item() batch = { 'id': id, 'nsentences': len(samples), 'ntokens': ntokens, 'net_input': { 'src_tokens': src_tokens, 'src_lengths': src_lengths, }, 'target': target, } if prev_output_tokens is not None: #true batch['net_input']['prev_output_tokens'] = prev_output_tokens.index_select(0, sort_order) if samples[0].get('alignment', None) is not None: #false bsz, tgt_sz = batch['target'].shape src_sz = batch['net_input']['src_tokens'].shape[1] offsets = torch.zeros((len(sort_order), 2), dtype=torch.long) offsets[:, 1] += (torch.arange(len(sort_order), dtype=torch.long) * tgt_sz) if left_pad_source: offsets[:, 0] += (src_sz - src_lengths) if left_pad_target: offsets[:, 1] += (tgt_sz - tgt_lengths) alignments = [ alignment + offset for align_idx, offset, src_len, tgt_len in zip(sort_order, offsets, src_lengths, tgt_lengths) for alignment in [samples[align_idx]['alignment'].view(-1, 2)] if check_alignment(alignment, src_len, tgt_len) ] if len(alignments) > 0: alignments = torch.cat(alignments, dim=0) align_weights = compute_alignment_weights(alignments) batch['alignments'] = alignments batch['align_weights'] = align_weights if samples[0].get("constraints", None) is not None: #false # Collate the packed constraints across the samples, padding to # the length of the longest sample. lens = [sample.get("constraints").size(0) for sample in samples] max_len = max(lens) constraints = torch.zeros((len(samples), max(lens))).long() for i, sample in enumerate(samples): constraints[i, 0:lens[i]] = samples[i].get("constraints") batch["constraints"] = constraints return batch class LanguagePairDataset(FairseqDataset): """ A pair of torch.utils.data.Datasets. Args: src (torch.utils.data.Dataset): source dataset to wrap src_sizes (List[int]): source sentence lengths src_dict (~fairseq.data.Dictionary): source vocabulary tgt (torch.utils.data.Dataset, optional): target dataset to wrap tgt_sizes (List[int], optional): target sentence lengths tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary left_pad_source (bool, optional): pad source tensors on the left side (default: True). left_pad_target (bool, optional): pad target tensors on the left side (default: False). shuffle (bool, optional): shuffle dataset elements before batching (default: True). input_feeding (bool, optional): create a shifted version of the targets to be passed into the model for teacher forcing (default: True). remove_eos_from_source (bool, optional): if set, removes eos from end of source if it's present (default: False). append_eos_to_target (bool, optional): if set, appends eos to end of target if it's absent (default: False). align_dataset (torch.utils.data.Dataset, optional): dataset containing alignments. constraints (Tensor, optional): 2d tensor with a concatenated, zero- delimited list of constraints for each sentence. append_bos (bool, optional): if set, appends bos to the beginning of source/target sentence. num_buckets (int, optional): if set to a value greater than 0, then batches will be bucketed into the given number of batch shapes. src_lang_id (int, optional): source language ID, if set, the collated batch will contain a field 'src_lang_id' in 'net_input' which indicates the source language of the samples. tgt_lang_id (int, optional): target language ID, if set, the collated batch will contain a field 'tgt_lang_id' which indicates the target language of the samples. """ def __init__( self, src, src_sizes, src_dict, tgt=None, tgt_sizes=None, tgt_dict=None, left_pad_source=True, left_pad_target=False, shuffle=True, input_feeding=True, remove_eos_from_source=False, append_eos_to_target=False, align_dataset=None, constraints=None, append_bos=False, eos=None, num_buckets=0, src_lang_id=None, tgt_lang_id=None, sort_noise=0, ): if tgt_dict is not None: assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() if tgt is not None: assert len(src) == len(tgt), "Source and target must contain the same number of examples" self.src = src self.tgt = tgt self.src_sizes = np.array(src_sizes) self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None self.src_dict = src_dict self.tgt_dict = tgt_dict self.left_pad_source = left_pad_source self.left_pad_target = left_pad_target self.shuffle = shuffle self.input_feeding = input_feeding self.remove_eos_from_source = remove_eos_from_source self.append_eos_to_target = append_eos_to_target self.align_dataset = align_dataset if self.align_dataset is not None: assert self.tgt_sizes is not None, "Both source and target needed when alignments are provided" self.constraints = constraints self.append_bos = append_bos self.eos = (eos if eos is not None else src_dict.eos()) #self.bos = None if bos<=0 else src_dict.bos() #my code bos=None即没有bos self.src_lang_id = src_lang_id self.tgt_lang_id = tgt_lang_id self.sort_noise = sort_noise if num_buckets > 0: #0 from fairseq.data import BucketPadLengthDataset self.src = BucketPadLengthDataset( self.src, sizes=self.src_sizes, num_buckets=num_buckets, pad_idx=self.src_dict.pad(), left_pad=self.left_pad_source, ) self.src_sizes = self.src.sizes logger.info('bucketing source lengths: {}'.format(list(self.src.buckets))) if self.tgt is not None: self.tgt = BucketPadLengthDataset( self.tgt, sizes=self.tgt_sizes, num_buckets=num_buckets, pad_idx=self.tgt_dict.pad(), left_pad=self.left_pad_target, ) self.tgt_sizes = self.tgt.sizes logger.info('bucketing target lengths: {}'.format(list(self.tgt.buckets))) # determine bucket sizes using self.num_tokens, which will return # the padded lengths (thanks to BucketPadLengthDataset) num_tokens = np.vectorize(self.num_tokens, otypes=[np.long]) self.bucketed_num_tokens = num_tokens(np.arange(len(self.src))) self.buckets = [ (None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens) ] else: self.buckets = None def get_batch_shapes(self): return self.buckets def __getitem__(self, index): tgt_item = self.tgt[index] if self.tgt is not None else None src_item = self.src[index] # Append EOS to end of tgt sentence if it does not have an EOS and remove # EOS from end of src sentence if it exists. This is useful when we use # use existing datasets for opposite directions i.e., when we want to # use tgt_dataset as src_dataset and vice versa if self.append_eos_to_target: #false eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos() if self.tgt and self.tgt[index][-1] != eos: tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])]) if self.append_bos:#false bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos() if self.tgt and self.tgt[index][0] != bos: tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]]) bos = self.src_dict.bos() if self.src[index][0] != bos: src_item = torch.cat([torch.LongTensor([bos]), self.src[index]]) if self.remove_eos_from_source: #false eos = self.src_dict.eos() if self.src[index][-1] == eos: src_item = self.src[index][:-1] example = { 'id': index, 'source': src_item, 'target': tgt_item, } if self.align_dataset is not None: #None example['alignment'] = self.align_dataset[index] if self.constraints is not None: #None example["constraints"] = self.constraints[index] return example def __len__(self): return len(self.src) def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate pad_to_length (dict, optional): a dictionary of {'source': source_pad_to_length, 'target': target_pad_to_length} to indicate the max length to pad to in source and target respectively. Returns: dict: a mini-batch with the following keys: - `id` (LongTensor): example IDs in the original input order - `ntokens` (int): total number of tokens in the batch - `net_input` (dict): the input to the Model, containing keys: - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in the source sentence of shape `(bsz, src_len)`. Padding will appear on the left if *left_pad_source* is ``True``. - `src_lengths` (LongTensor): 1D Tensor of the unpadded lengths of each source sentence of shape `(bsz)` - `prev_output_tokens` (LongTensor): a padded 2D Tensor of tokens in the target sentence, shifted right by one position for teacher forcing, of shape `(bsz, tgt_len)`. This key will not be present if *input_feeding* is ``False``. Padding will appear on the left if *left_pad_target* is ``True``. - `src_lang_id` (LongTensor): a long Tensor which contains source language IDs of each sample in the batch - `target` (LongTensor): a padded 2D Tensor of tokens in the target sentence of shape `(bsz, tgt_len)`. Padding will appear on the left if *left_pad_target* is ``True``. - `tgt_lang_id` (LongTensor): a long Tensor which contains target language IDs of each sample in the batch """ res = collate( samples, pad_idx=self.src_dict.pad(), eos_idx=self.eos, left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target, input_feeding=self.input_feeding, pad_to_length=pad_to_length, ) if self.src_lang_id is not None or self.tgt_lang_id is not None: src_tokens = res['net_input']['src_tokens'] bsz = src_tokens.size(0) if self.src_lang_id is not None: res['net_input']['src_lang_id'] = torch.LongTensor( [[self.src_lang_id]] ).expand(bsz, 1).to(src_tokens) if self.tgt_lang_id is not None: res['tgt_lang_id'] = torch.LongTensor( [[self.tgt_lang_id]] ).expand(bsz, 1).to(src_tokens) return res def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0) def src_num_tokens(self,index): return self.src_sizes[index] def tgt_num_tokens(self,index): return self.tgt_sizes[index] if self.tgt_sizes is not None else 0 def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return (self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)).astype(np.int64) else: indices = np.arange(len(self), dtype=np.int64) if self.buckets is None: #true # sort by target length, then source length if self.tgt_sizes is not None: #true #print(self.tgt_sizes) sizes = [size+np.random.randint(self.sort_noise+1) for size in self.tgt_sizes] sizes = np.array(sizes) #print(sizes);exit() indices = indices[ np.argsort(sizes[indices], kind='mergesort') ] #indices = indices[ # np.argsort(self.tgt_sizes[indices], kind='mergesort') #] sizes = [size+np.random.randint(self.sort_noise+1) for size in self.src_sizes] sizes = np.array(sizes) indices = indices[ np.argsort(sizes[indices], kind='mergesort') ] #return indices[np.argsort(self.src_sizes[indices], kind='mergesort')] return indices else: # sort by bucketed_num_tokens, which is: # max(padded_src_len, padded_tgt_len) return indices[ np.argsort(self.bucketed_num_tokens[indices], kind='mergesort') ] @property def supports_prefetch(self): return ( getattr(self.src, 'supports_prefetch', False) and (getattr(self.tgt, 'supports_prefetch', False) or self.tgt is None) ) def prefetch(self, indices): self.src.prefetch(indices) if self.tgt is not None: self.tgt.prefetch(indices) if self.align_dataset is not None: self.align_dataset.prefetch(indices) def filter_indices_by_size(self, indices, max_sizes): """ Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ if max_sizes is None: return indices, [] if type(max_sizes) in (int, float): max_src_size, max_tgt_size = max_sizes, max_sizes else: max_src_size, max_tgt_size = max_sizes if self.tgt_sizes is None: ignored = indices[self.src_sizes[indices] > max_src_size] else: ignored = indices[(self.src_sizes[indices] > max_src_size) | (self.tgt_sizes[indices] > max_tgt_size)] if len(ignored) > 0: if self.tgt_sizes is None: indices = indices[self.src_sizes[indices] <= max_src_size] else: indices = indices[(self.src_sizes[indices] <= max_src_size) & (self.tgt_sizes[indices] <= max_tgt_size)] return indices, ignored.tolist()
19,691
41.715835
107
py
RegularizedBN
RegularizedBN-main/fairseq/data/token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from fairseq.data import FairseqDataset, plasma_utils class TokenBlockDataset(FairseqDataset): """Break a Dataset of tokens into blocks. Args: dataset (~torch.utils.data.Dataset): dataset to break into blocks sizes (List[int]): sentence lengths (required for 'complete' and 'eos') block_size (int): maximum block size (ignored in 'eos' break mode) break_mode (str, optional): Mode used for breaking tokens. Values can be one of: - 'none': break tokens into equally sized blocks (up to block_size) - 'complete': break tokens into blocks (up to block_size) such that blocks contains complete sentences, although block_size may be exceeded if some sentences exceed block_size - 'complete_doc': similar to 'complete' mode, but do not cross document boundaries - 'eos': each block contains one sentence (block_size is ignored) include_targets (bool, optional): return next tokens as targets (default: False). document_sep_len (int, optional): document separator size (required for 'complete_doc' break mode). Typically 1 if the sentences have eos and 0 otherwise. """ def __init__( self, dataset, sizes, block_size, pad, eos, break_mode=None, include_targets=False, document_sep_len=1, ): try: from fairseq.data.token_block_utils_fast import ( _get_slice_indices_fast, _get_block_to_dataset_index_fast, ) except ImportError: raise ImportError( 'Please build Cython components with: `pip install --editable .` ' 'or `python setup.py build_ext --inplace`' ) super().__init__() self.dataset = dataset self.pad = pad self.eos = eos self.include_targets = include_targets assert len(dataset) == len(sizes) assert len(dataset) > 0 if isinstance(sizes, list): sizes = np.array(sizes, dtype=np.int64) else: if torch.is_tensor(sizes): sizes = sizes.numpy() sizes = sizes.astype(np.int64) break_mode = break_mode if break_mode is not None else 'none' # For "eos" break-mode, block_size is not required parameters. if break_mode == "eos" and block_size is None: block_size = 0 slice_indices = _get_slice_indices_fast(sizes, break_mode, block_size, document_sep_len) self._sizes = slice_indices[:, 1] - slice_indices[:, 0] # build index mapping block indices to the underlying dataset indices if break_mode == "eos": # much faster version for eos break mode block_to_dataset_index = np.stack( [ np.arange(len(sizes)), # starting index in dataset np.zeros( len(sizes), dtype=np.long ), # starting offset within starting index np.arange(len(sizes)), # ending index in dataset ], 1, ) else: block_to_dataset_index = _get_block_to_dataset_index_fast( sizes, slice_indices, ) self._slice_indices = plasma_utils.PlasmaArray(slice_indices) self._sizes = plasma_utils.PlasmaArray(self._sizes) self._block_to_dataset_index = plasma_utils.PlasmaArray(block_to_dataset_index) @property def slice_indices(self): return self._slice_indices.array @property def sizes(self): return self._sizes.array @property def block_to_dataset_index(self): return self._block_to_dataset_index.array def attr(self, attr: str, index: int): start_ds_idx, _, _ = self.block_to_dataset_index[index] return self.dataset.attr(attr, start_ds_idx) def __getitem__(self, index): start_ds_idx, start_offset, end_ds_idx = self.block_to_dataset_index[index] buffer = torch.cat( [self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)] ) slice_s, slice_e = self.slice_indices[index] length = slice_e - slice_s s, e = start_offset, start_offset + length item = buffer[s:e] if self.include_targets: # *target* is the original sentence (=item) # *source* is shifted right by 1 (maybe left-padded with eos) # *past_target* is shifted right by 2 (left-padded as needed) if s == 0: source = torch.cat([item.new([self.eos]), buffer[0 : e - 1]]) past_target = torch.cat( [item.new([self.pad, self.eos]), buffer[0 : e - 2]] ) else: source = buffer[s - 1 : e - 1] if s == 1: past_target = torch.cat([item.new([self.eos]), buffer[0 : e - 2]]) else: past_target = buffer[s - 2 : e - 2] return source, item, past_target return item def __len__(self): return len(self.slice_indices) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): self.dataset.prefetch( { ds_idx for index in indices for start_ds_idx, _, end_ds_idx in [self.block_to_dataset_index[index]] for ds_idx in range(start_ds_idx, end_ds_idx + 1) } )
5,961
34.700599
96
py
RegularizedBN
RegularizedBN-main/fairseq/data/subsample_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np from . import BaseWrapperDataset logger = logging.getLogger(__name__) class SubsampleDataset(BaseWrapperDataset): """Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples Args: dataset (~torch.utils.data.Dataset): dataset to subsample size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive) """ def __init__(self, dataset, size_ratio): super().__init__(dataset) assert size_ratio < 1 self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int) self.indices = np.random.choice( list(range(len(self.dataset))), self.actual_size, replace=False ) logger.info( "subsampled dataset from {} to {} (ratio={})".format( len(self.dataset), self.actual_size, size_ratio ) ) def __getitem__(self, index): return self.dataset[self.indices[index]] def __len__(self): return self.actual_size def collater(self, samples): return self.dataset.collater(samples) @property def sizes(self): return self.dataset.sizes[self.indices] @property def name(self): return self.dataset.name def num_tokens(self, index): return self.dataset.num_tokens(self.indices[index]) def size(self, index): return self.dataset.size(self.indices[index]) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) def prefetch(self, indices): self.dataset.prefetch(self.indices[indices])
2,103
28.222222
101
py
RegularizedBN
RegularizedBN-main/fairseq/data/prepend_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class PrependDataset(BaseWrapperDataset): def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): super().__init__(dataset) self.prepend_getter = prepend_getter self.ensure_first_token = ensure_first_token_is def __getitem__(self, idx): item = self.dataset[idx] is_tuple = isinstance(item, tuple) src = item[0] if is_tuple else item assert self.ensure_first_token is None or src[0] == self.ensure_first_token prepend_idx = self.prepend_getter(self.dataset, idx) assert isinstance(prepend_idx, int) src[0] = prepend_idx item = tuple((src,) + item[1:]) if is_tuple else src return item
953
31.896552
83
py
RegularizedBN
RegularizedBN-main/fairseq/data/base_wrapper_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch.utils.data.dataloader import default_collate from . import FairseqDataset class BaseWrapperDataset(FairseqDataset): def __init__(self, dataset): super().__init__() self.dataset = dataset def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples): if hasattr(self.dataset, 'collater'): return self.dataset.collater(samples) else: return default_collate(samples) @property def sizes(self): return self.dataset.sizes def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) def ordered_indices(self): return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, 'supports_prefetch', False) def prefetch(self, indices): self.dataset.prefetch(indices) def get_batch_shapes(self): return self.dataset.get_batch_shapes() def batch_by_size( self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, ): return self.dataset.batch_by_size( indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, ) def set_epoch(self, epoch): super().set_epoch(epoch) if hasattr(self.dataset, 'set_epoch'): self.dataset.set_epoch(epoch)
1,808
24.842857
70
py