repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
AutoAE
AutoAE-master/get_record_list.py
import sys import os os.environ["CUDA_VISIBLE_DEVICES"] = "1" import torch import torch.nn as nn from torch.nn import functional as F from PIL import Image import numpy as np import torchvision import imageio from torchvision import transforms import argparse from attack_ops import apply_attacker from tqdm import tqdm from tv_utils import ImageNet,Permute import copy import pickle import random gpu_idx = 0 class NormalizeByChannelMeanStd(nn.Module): def __init__(self, mean, std): super(NormalizeByChannelMeanStd, self).__init__() if not isinstance(mean, torch.Tensor): mean = torch.tensor(mean) if not isinstance(std, torch.Tensor): std = torch.tensor(std) self.register_buffer("mean", mean) self.register_buffer("std", std) def forward(self, tensor): return normalize_fn(tensor, self.mean, self.std) def extra_repr(self): return 'mean={}, std={}'.format(self.mean, self.std) def normalize_fn(tensor, mean, std): """Differentiable version of torchvision.functional.normalize""" # here we assume the color channel is in at dim=1 mean = mean[None, :, None, None] std = std[None, :, None, None] return tensor.sub(mean).div(std) def predict_from_logits(logits, dim=1): return logits.max(dim=dim, keepdim=False)[1] def record_get_attacker_accuracy(model,new_attack,copy_acc_total): model.eval() model = model.to(device) criterion = nn.CrossEntropyLoss() criterion = criterion.to(device) acc_curve = [] acc_total = copy.deepcopy(copy_acc_total) total_record_list = [] for i in range(new_attack['step']): total_record_list.append([]) for _ in range(args.num_restarts): total_num = 0 clean_acc_num = 0 adv_acc_num = 0 attack_successful_num = 0 batch_idx = 0 for loaded_data in tqdm(test_loader): test_images, test_labels = loaded_data[0], loaded_data[1] bstart = batch_idx * args.batch_size if test_labels.size(0) < args.batch_size: bend = batch_idx * args.batch_size + test_labels.size(0) else: bend = (batch_idx+1) * args.batch_size test_images, test_labels = test_images.to(device), test_labels.to(device) total_num += test_labels.size(0) clean_logits = model(test_images) pred = predict_from_logits(clean_logits) acc_total[bstart:bend] = acc_total[bstart:bend] * (pred==test_labels).cpu().numpy() if len(test_images.shape) == 3: test_images = test_images.unsqueeze(0) test_labels = test_labels.unsqueeze(0) if len(test_labels.size()) == 0: clean_acc_num += 1 else: clean_acc_num += test_labels.size(0) previous_p = None attack_name = new_attack['attacker'] attack_eps = new_attack['magnitude'] attack_steps = new_attack['step'] adv_images, p, record_list= apply_attacker(test_images, attack_name, test_labels, model, attack_eps, previous_p, int(attack_steps), args.max_epsilon, _type=args.norm, gpu_idx=gpu_idx,) pred = predict_from_logits(model(adv_images.detach())) acc_total[bstart:bend]= acc_total[bstart:bend] * (pred==test_labels).cpu().numpy() batch_idx += 1 print('accuracy_total: {}/{}'.format(int(acc_total.sum()), len(test_loader.dataset))) print('natural_acc_oneshot: ', clean_acc_num/total_num) print('robust_acc_oneshot: ', (total_num-len(test_loader.dataset)+acc_total.sum()) /total_num) total_record_list = np.hstack((total_record_list,record_list)) acc_curve.append(acc_total.sum()) with open("Record_list_"+attack_name+".pkl","wb") as f: pickle.dump(total_record_list, f) return acc_total def record_append_next_attack(model,last_acc_total,t_max): max_result = 0 original_accuracy = last_acc_total.sum() best_attacker = None best_acc_total = None best_t = None acc_total = copy.deepcopy(last_acc_total) for attack_idx in range(len(candidate_pool)): new_attack = candidate_pool[attack_idx] for t in range(1000,t_max+1,125): new_attack['step']= t tmp_acc_total = record_get_attacker_accuracy(model,new_attack,acc_total) cur_result = abs(original_accuracy-tmp_acc_total.sum())/t if cur_result>max_result: best_t = copy.deepcopy(t) best_acc_total = copy.deepcopy(tmp_acc_total) best_attacker = copy.deepcopy(new_attack) return [best_attacker,best_acc_total,best_t] def record_greedy_algorithm(model): policy = [] acc_total = np.ones(len(test_loader.dataset)) t_max = 1000 while t_max > 0: [next_attack, acc_total, t] = record_append_next_attack(model, acc_total, t_max) if next_attack is None: return policy policy.append(next_attack) t_max = t_max - t return policy def get_5000_train_data(cifar10_train): selected_cifar_train = [] for i in range(10): sub_selected_cifar_train = [] for item in cifar10_train: if item[1]==i: sub_selected_cifar_train.append(item) if len(sub_selected_cifar_train)==500: break selected_cifar_train.extend(sub_selected_cifar_train) return selected_cifar_train parser = argparse.ArgumentParser(description='Random search of Auto-attack') parser.add_argument('--seed', type=int, default=2020, help='random seed') parser.add_argument('--batch_size', type=int, default=256, metavar='N', help='batch size for data loader') parser.add_argument('--dataset', default='cifar10', help='cifar10 | cifar100 | svhn | ile') parser.add_argument('--num_classes', type=int, default=10, help='the # of classes') parser.add_argument('--net_type', default='madry_adv_resnet50', help='resnet18 | resnet50 | inception_v3 | densenet121 | vgg16_bn') parser.add_argument('--num_restarts', type=int, default=1, help='the # of classes') parser.add_argument('--max_epsilon', type=float, default=8/255, help='the attack sequence length') parser.add_argument('--ensemble', action='store_true', help='the attack sequence length') parser.add_argument('--transfer_test', action='store_true', help='the attack sequence length') parser.add_argument('--sub_net_type', default='madry_adv_resnet50', help='resnet18 | resnet50 | inception_v3 | densenet121 | vgg16_bn') parser.add_argument('--target', action='store_true', default=False) parser.add_argument('--norm', default='linf', help='linf | l2 | unrestricted') parser.add_argument('--linf_attacker', default='Record_CWAttack_adaptive_stepsize_Linf', help='RecordFabAttack_Linf | RecordApgdDlrAttack_Linf | RecordApgdCeAttack_Linf |\ Record_CWAttack_adaptive_stepsize_Linf | RecordMultiTargetedAttack_Linf ') parser.add_argument('--l2_attacker', default='Record_CWAttack_adaptive_stepsize_L2', help='Record_CWAttack_adaptive_stepsize_L2 | RecordMultiTargetedAttack_L2 | RecordApgdCeAttack_L2 |\ RecordApgdDlrAttack_L2 | RecordFabAttack_L2 | Record_PGD_Attack_adaptive_stepsize_L2 | RecordDDNL2Attack_L2') args = parser.parse_args() print(args) RecordMultiTargetedAttack_L2 = {'attacker': 'RecordMultiTargetedAttack', 'magnitude': 0.5, 'step': 50} RecordMultiTargetedAttack_Linf = {'attacker': 'RecordMultiTargetedAttack', 'magnitude': 8/255, 'step': 50} Record_CWAttack_adaptive_stepsize_L2 = {'attacker': 'Record_CW_Attack_adaptive_stepsize', 'magnitude': 0.5, 'step': 50} Record_CWAttack_adaptive_stepsize_Linf = {'attacker': 'Record_CW_Attack_adaptive_stepsize', 'magnitude': 8/255, 'step': 50} Record_PGD_Attack_adaptive_stepsize_L2 = {'attacker': 'Record_PGD_Attack_adaptive_stepsize', 'magnitude': 0.5, 'step': 50} RecordDDNL2Attack_L2 = {'attacker': 'RecordDDNL2Attack', 'magnitude': None, 'step': 50} RecordApgdCeAttack_L2 = {'attacker': 'RecordApgdCeAttack', 'magnitude': 0.5, 'step': 50} RecordApgdCeAttack_Linf = {'attacker': 'RecordApgdCeAttack', 'magnitude': 8/255, 'step': 50} RecordApgdDlrAttack_L2 ={'attacker': 'RecordApgdDlrAttack', 'magnitude': 0.5, 'step': 50} RecordApgdDlrAttack_Linf = {'attacker': 'RecordApgdDlrAttack', 'magnitude': 8/255, 'step': 50} RecordFabAttack_L2 = {'attacker': 'RecordFabAttack', 'magnitude': 0.5, 'step': 50} RecordFabAttack_Linf = {'attacker': 'RecordFabAttack', 'magnitude': 8/255, 'step': 50} if args.norm == 'linf': if args.linf_attacker == 'RecordFabAttack_Linf': candidate_pool = [RecordFabAttack_Linf] elif args.linf_attacker == 'RecordApgdDlrAttack_Linf': candidate_pool = [RecordApgdDlrAttack_Linf] elif args.linf_attacker == 'RecordApgdCeAttack_Linf': candidate_pool = [RecordApgdCeAttack_Linf] elif args.linf_attacker == 'Record_CWAttack_adaptive_stepsize_Linf': candidate_pool = [Record_CWAttack_adaptive_stepsize_Linf] elif args.linf_attacker == 'RecordMultiTargetedAttack_Linf': candidate_pool = [RecordMultiTargetedAttack_Linf] elif args.norm == 'l2': if args.l2_attacker =='RecordDDNL2Attack_L2': candidate_pool = [RecordDDNL2Attack_L2] elif args.l2_attacker =='Record_PGD_Attack_adaptive_stepsize_L2': candidate_pool = [Record_PGD_Attack_adaptive_stepsize_L2] elif args.l2_attacker =='RecordFabAttack_L2': candidate_pool = [RecordFabAttack_L2] elif args.l2_attacker =='RecordApgdDlrAttack_L2': candidate_pool = [RecordApgdDlrAttack_L2] elif args.l2_attacker =='RecordApgdCeAttack_L2': candidate_pool = [RecordApgdCeAttack_L2] elif args.l2_attacker =='RecordMultiTargetedAttack_L2': candidate_pool = [RecordMultiTargetedAttack_L2] elif args.l2_attacker =='Record_CWAttack_adaptive_stepsize_L2': candidate_pool = [Record_CWAttack_adaptive_stepsize_L2] print('candidate_pool: ', candidate_pool) device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device('cpu') if args.dataset == 'cifar10': args.num_classes = 10 cifar10_train = torchvision.datasets.CIFAR10(root='/root/project/data/cifar10', train=True, transform = transforms.ToTensor()) selected_cifar_train = get_5000_train_data(cifar10_train) test_loader = torch.utils.data.DataLoader(selected_cifar_train, batch_size=args.batch_size,shuffle=False, pin_memory=True, num_workers=8) if args.net_type == 'madry_adv_resnet50': from cifar_models.resnet import resnet50 model = resnet50() model.load_state_dict({k[13:]:v for k,v in torch.load('./checkpoints/cifar_linf_8.pt')['state_dict'].items() if 'attacker' not in k and 'new' not in k}) normalize = NormalizeByChannelMeanStd( mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]) model = nn.Sequential(normalize, model) elif args.net_type == 'madry_adv_resnet50_l2': from cifar_models.resnet import resnet50 model = resnet50() model.load_state_dict({k[13:]:v for k,v in torch.load('./checkpoints/cifar_l2_0_5.pt')['state_dict'].items() if 'attacker' not in k and 'new' not in k}) normalize = NormalizeByChannelMeanStd( mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]) model = nn.Sequential(normalize, model) else: raise Exception('The net_type of {} is not supported by now!'.format(args.net_type)) result_policy = record_greedy_algorithm(model)
10,906
44.827731
187
py
AutoAE
AutoAE-master/attack_utils.py
import torch import torch.nn as nn #from torch.autograd.gradcheck import zero_gradients import numpy as np device = torch.device("cuda") if torch.cuda.is_available() else torch.device('cpu') def zero_gradients(x): if isinstance(x, torch.Tensor): if x.grad is not None: x.grad.detach_() x.grad.zero_() elif isinstance(x, collections.abc.Iterable): for elem in x: zero_gradients(elem) def predict_from_logits(logits, dim=1): return logits.max(dim=dim, keepdim=False)[1] def normalize_fn(tensor, mean, std): mean = mean[None, :, None, None] std = std[None, :, None, None] return tensor.sub(mean).div(std) class NormalizeByChannelMeanStd(nn.Module): def __init__(self, mean, std): super(NormalizeByChannelMeanStd, self).__init__() if not isinstance(mean, torch.Tensor): mean = torch.tensor(mean) if not isinstance(std, torch.Tensor): std = torch.tensor(std) self.register_buffer("mean", mean) self.register_buffer("std", std) def forward(self, tensor): return normalize_fn(tensor, self.mean, self.std) def extra_repr(self): return 'mean={}, std={}'.format(self.mean, self.std) def get_diff_logits_grads_batch(model, imgs, la): im = imgs.clone().requires_grad_() with torch.enable_grad(): y = model(im) g2 = torch.zeros([y.shape[-1], *imgs.size()]).to(device) grad_mask = torch.zeros_like(y) for counter in range(y.shape[-1]): zero_gradients(im) grad_mask[:, counter] = 1.0 y.backward(grad_mask, retain_graph=True) grad_mask[:, counter] = 0.0 g2[counter] = im.grad.data g2 = torch.transpose(g2, 0, 1).detach() y2 = model(imgs).detach() df = y2 - y2[torch.arange(imgs.shape[0]), la].unsqueeze(1) dg = g2 - g2[torch.arange(imgs.shape[0]), la].unsqueeze(1) df[torch.arange(imgs.shape[0]), la] = 1e10 return df, dg def check_shape(x): return x if len(x.shape) > 0 else x.unsqueeze(0) def dlr_loss(x, y, y_target): x_sorted, ind_sorted = x.sort(dim=1) return -(x[np.arange(x.shape[0]), y] - x[np.arange(x.shape[0]), y_target]) / (x_sorted[:, -1] - .5 * x_sorted[:, -3] - .5 * x_sorted[:, -4] + 1e-12) def projection_linf(points_to_project, w_hyperplane, b_hyperplane): t = points_to_project.clone() w = w_hyperplane.clone() b = b_hyperplane.clone() ind2 = ((w * t).sum(1) - b < 0).nonzero().squeeze() ind2 = check_shape(ind2) w[ind2] *= -1 b[ind2] *= -1 c5 = (w < 0).float() a = torch.ones(t.shape).to(device) d = (a * c5 - t) * (w != 0).float() a -= a * (1 - c5) p = torch.ones(t.shape).to(device) * c5 - t * (2 * c5 - 1) _, indp = torch.sort(p, dim=1) b = b - (w * t).sum(1) b0 = (w * d).sum(1) b1 = b0.clone() counter = 0 indp2 = torch.flip(indp.unsqueeze(-1), dims=(1, 2)).squeeze() u = torch.arange(0, w.shape[0]) ws = w[u.unsqueeze(1), indp2] bs2 = - ws * d[u.unsqueeze(1), indp2] s = torch.cumsum(ws.abs(), dim=1) sb = torch.cumsum(bs2, dim=1) + b0.unsqueeze(1) c = b - b1 > 0 b2 = sb[u, -1] - s[u, -1] * p[u, indp[u, 0]] c_l = (b - b2 > 0).nonzero().squeeze() c2 = ((b - b1 > 0) * (b - b2 <= 0)).nonzero().squeeze() c_l = check_shape(c_l) c2 = check_shape(c2) lb = torch.zeros(c2.shape[0]) ub = torch.ones(c2.shape[0]) * (w.shape[1] - 1) nitermax = torch.ceil(torch.log2(torch.tensor(w.shape[1]).float())) counter2 = torch.zeros(lb.shape).long() while counter < nitermax: counter4 = torch.floor((lb + ub) / 2) counter2 = counter4.long() indcurr = indp[c2, -counter2 - 1] b2 = sb[c2, counter2] - s[c2, counter2] * p[c2, indcurr] c = b[c2] - b2 > 0 ind3 = c.nonzero().squeeze() ind32 = (~c).nonzero().squeeze() ind3 = check_shape(ind3) ind32 = check_shape(ind32) lb[ind3] = counter4[ind3] ub[ind32] = counter4[ind32] counter += 1 lb = lb.long() counter2 = 0 if c_l.nelement() != 0: lmbd_opt = (torch.max((b[c_l] - sb[c_l, -1]) / (-s[c_l, -1]), torch.zeros(sb[c_l, -1].shape) .to(device))).unsqueeze(-1) d[c_l] = (2 * a[c_l] - 1) * lmbd_opt lmbd_opt = (torch.max((b[c2] - sb[c2, lb]) / (-s[c2, lb]), torch.zeros(sb[c2, lb].shape) .to(device))).unsqueeze(-1) d[c2] = torch.min(lmbd_opt, d[c2]) * c5[c2]\ + torch.max(-lmbd_opt, d[c2]) * (1 - c5[c2]) return d * (w != 0).float()
4,672
31.227586
152
py
AutoAE
AutoAE-master/tv_utils.py
from PIL import Image import PIL from PIL import ImageFilter import numbers import torchvision.transforms.functional as TF from torch.utils.data import Dataset import numpy as np import os import torch import torch.nn as nn import torchvision.datasets _pil_interpolation_to_str = { Image.NEAREST: 'PIL.Image.NEAREST', Image.BILINEAR: 'PIL.Image.BILINEAR', Image.BICUBIC: 'PIL.Image.BICUBIC', Image.LANCZOS: 'PIL.Image.LANCZOS', } IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') class Permute(nn.Module): def __init__(self, permutation = [2,1,0]): super().__init__() self.permutation = permutation def forward(self, input): return input[:, self.permutation] class ImageNet(Dataset): def __init__(self, root_dir, csv_name='labels', transform=None): self.transform = transform self.datas = [] with open(os.path.join(root_dir, csv_name)) as f: for line in f.readlines(): img_path, gt_label = line.strip().split(' ') self.datas.append((os.path.join(root_dir, img_path), int(gt_label))) def __len__(self): l = len(self.datas) return l def __getitem__(self, idx): filename, label_source = self.datas[idx] # filename = os.path.join(self.image_dir, self.labels.at[idx, 'ImageId']) in_img_t = Image.open(filename) if self.transform is not None: in_img_t = self.transform(in_img_t) return in_img_t, label_source class GaussianSmoothing(object): def __init__(self, radius): if isinstance(radius, numbers.Number): self.min_radius = radius self.max_radius = radius elif isinstance(radius, list): if len(radius) != 2: raise Exception( "`radius` should be a number or a list of two numbers") if radius[1] < radius[0]: raise Exception( "radius[0] should be <= radius[1]") self.min_radius = radius[0] self.max_radius = radius[1] else: raise Exception( "`radius` should be a number or a list of two numbers") def __call__(self, image): radius = np.random.uniform(self.min_radius, self.max_radius) return image.filter(ImageFilter.GaussianBlur(radius)) class SpatialAffine(object): def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0): self.degrees = degrees if translate is not None: assert isinstance(translate, (tuple, list)) and len(translate) == 2, \ "translate should be a list or tuple and it must be of length 2." self.translate = translate self.scale = scale self.shear = shear self.resample = resample self.fillcolor = fillcolor @staticmethod def get_params(degrees, translate, scale_ranges, shears, img_size): """Get parameters for affine transformation Returns: sequence: params to be passed to the affine transformation """ angle = degrees if translate is not None: max_dx = translate[0] max_dy = translate[1] translations = (max_dx, max_dy) else: translations = (0, 0) if scale_ranges is not None: scale = scale_ranges else: scale = 1.0 if shears is not None: shear = shears else: shear = 0.0 return angle, translations, scale, shear def __call__(self, img): """ img (PIL Image): Image to be transformed. Returns: PIL Image: Affine transformed image. """ ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size) return TF.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor) def __repr__(self): s = '{name}(degrees={degrees}' if self.translate is not None: s += ', translate={translate}' if self.scale is not None: s += ', scale={scale}' if self.shear is not None: s += ', shear={shear}' if self.resample > 0: s += ', resample={resample}' if self.fillcolor != 0: s += ', fillcolor={fillcolor}' s += ')' d = dict(self.__dict__) d['resample'] = _pil_interpolation_to_str[d['resample']] return s.format(name=self.__class__.__name__, **d)
4,615
30.616438
101
py
AutoAE
AutoAE-master/fab_projections.py
import math import torch from torch.nn import functional as F def fab_projection_linf(points_to_project, w_hyperplane, b_hyperplane): device = points_to_project.device t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane.clone() sign = 2 * ((w * t).sum(1) - b >= 0) - 1 w.mul_(sign.unsqueeze(1)) b.mul_(sign) a = (w < 0).float() d = (a - t) * (w != 0).float() p = a - t * (2 * a - 1) indp = torch.argsort(p, dim=1) b = b - (w * t).sum(1) b0 = (w * d).sum(1) indp2 = indp.flip((1,)) ws = w.gather(1, indp2) bs2 = - ws * d.gather(1, indp2) s = torch.cumsum(ws.abs(), dim=1) sb = torch.cumsum(bs2, dim=1) + b0.unsqueeze(1) b2 = sb[:, -1] - s[:, -1] * p.gather(1, indp[:, 0:1]).squeeze(1) c_l = b - b2 > 0 c2 = (b - b0 > 0) & (~c_l) lb = torch.zeros(c2.sum(), device=device) ub = torch.full_like(lb, w.shape[1] - 1) nitermax = math.ceil(math.log2(w.shape[1])) indp_, sb_, s_, p_, b_ = indp[c2], sb[c2], s[c2], p[c2], b[c2] for counter in range(nitermax): counter4 = torch.floor((lb + ub) / 2) counter2 = counter4.long().unsqueeze(1) indcurr = indp_.gather(1, indp_.size(1) - 1 - counter2) b2 = (sb_.gather(1, counter2) - s_.gather(1, counter2) * p_.gather(1, indcurr)).squeeze(1) c = b_ - b2 > 0 lb = torch.where(c, counter4, lb) ub = torch.where(c, ub, counter4) lb = lb.long() if c_l.any(): lmbd_opt = torch.clamp_min((b[c_l] - sb[c_l, -1]) / (-s[c_l, -1]), min=0).unsqueeze(-1) d[c_l] = (2 * a[c_l] - 1) * lmbd_opt lmbd_opt = torch.clamp_min((b[c2] - sb[c2, lb]) / (-s[c2, lb]), min=0).unsqueeze(-1) d[c2] = torch.min(lmbd_opt, d[c2]) * a[c2] + torch.max(-lmbd_opt, d[c2]) * (1 - a[c2]) return d * (w != 0).float() def projection_l2(points_to_project, w_hyperplane, b_hyperplane): device = points_to_project.device t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane c = (w * t).sum(1) - b ind2 = 2 * (c >= 0) - 1 w.mul_(ind2.unsqueeze(1)) c.mul_(ind2) r = torch.max(t / w, (t - 1) / w).clamp(min=-1e12, max=1e12) r.masked_fill_(w.abs() < 1e-8, 1e12) r[r == -1e12] *= -1 rs, indr = torch.sort(r, dim=1) rs2 = F.pad(rs[:, 1:], (0, 1)) rs.masked_fill_(rs == 1e12, 0) rs2.masked_fill_(rs2 == 1e12, 0) w3s = (w ** 2).gather(1, indr) w5 = w3s.sum(dim=1, keepdim=True) ws = w5 - torch.cumsum(w3s, dim=1) d = -(r * w) d.mul_((w.abs() > 1e-8).float()) s = torch.cat((-w5 * rs[:, 0:1], torch.cumsum((-rs2 + rs) * ws, dim=1) - w5 * rs[:, 0:1]), 1) c4 = s[:, 0] + c < 0 c3 = (d * w).sum(dim=1) + c > 0 c2 = ~(c4 | c3) lb = torch.zeros(c2.sum(), device=device) ub = torch.full_like(lb, w.shape[1] - 1) nitermax = math.ceil(math.log2(w.shape[1])) s_, c_ = s[c2], c[c2] for counter in range(nitermax): counter4 = torch.floor((lb + ub) / 2) counter2 = counter4.long().unsqueeze(1) c3 = s_.gather(1, counter2).squeeze(1) + c_ > 0 lb = torch.where(c3, counter4, lb) ub = torch.where(c3, ub, counter4) lb = lb.long() if c4.any(): alpha = c[c4] / w5[c4].squeeze(-1) d[c4] = -alpha.unsqueeze(-1) * w[c4] if c2.any(): alpha = (s[c2, lb] + c[c2]) / ws[c2, lb] + rs[c2, lb] alpha[ws[c2, lb] == 0] = 0 c5 = (alpha.unsqueeze(-1) > r[c2]).float() d[c2] = d[c2] * c5 - alpha.unsqueeze(-1) * w[c2] * (1 - c5) return d * (w.abs() > 1e-8).float() def projection_l1(points_to_project, w_hyperplane, b_hyperplane): device = points_to_project.device t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane c = (w * t).sum(1) - b ind2 = 2 * (c >= 0) - 1 w.mul_(ind2.unsqueeze(1)) c.mul_(ind2) r = (1 / w).abs().clamp_max(1e12) indr = torch.argsort(r, dim=1) indr_rev = torch.argsort(indr) c6 = (w < 0).float() d = (-t + c6) * (w != 0).float() ds = torch.min(-w * t, w * (1 - t)).gather(1, indr) ds2 = torch.cat((c.unsqueeze(-1), ds), 1) s = torch.cumsum(ds2, dim=1) c2 = s[:, -1] < 0 lb = torch.zeros(c2.sum(), device=device) ub = torch.full_like(lb, s.shape[1]) nitermax = math.ceil(math.log2(w.shape[1])) s_ = s[c2] for counter in range(nitermax): counter4 = torch.floor((lb + ub) / 2) counter2 = counter4.long().unsqueeze(1) c3 = s_.gather(1, counter2).squeeze(1) > 0 lb = torch.where(c3, counter4, lb) ub = torch.where(c3, ub, counter4) lb2 = lb.long() if c2.any(): indr = indr[c2].gather(1, lb2.unsqueeze(1)).squeeze(1) u = torch.arange(0, w.shape[0], device=device).unsqueeze(1) u2 = torch.arange(0, w.shape[1], device=device, dtype=torch.float).unsqueeze(0) alpha = -s[c2, lb2] / w[c2, indr] c5 = u2 < lb.unsqueeze(-1) u3 = c5[u[:c5.shape[0]], indr_rev[c2]] d[c2] = d[c2] * u3.float() d[c2, indr] = alpha return d * (w.abs() > 1e-8).float()
5,081
29.987805
98
py
AutoAE
AutoAE-master/cifar_models/resnet.py
'''ResNet in PyTorch. For Pre-activation ResNet, see 'preact_resnet.py'. Reference: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 ''' import torch import torch.nn as nn import torch.nn.functional as F class SequentialWithArgs(torch.nn.Sequential): def forward(self, input, *args, **kwargs): vs = list(self._modules.values()) l = len(vs) for i in range(l): if i == l-1: input = vs[i](input, *args, **kwargs) else: input = vs[i](input) return input class FakeReLU(torch.autograd.Function): @staticmethod def forward(ctx, input): return input.clamp(min=0) @staticmethod def backward(ctx, grad_output): return grad_output class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes)) def forward(self, x, fake_relu=False): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) if fake_relu: return FakeReLU.apply(out) return F.relu(out) class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion*planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x, fake_relu=False): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) if fake_relu: return FakeReLU.apply(out) return F.relu(out) class ResNet(nn.Module): # feat_scale lets us deal with CelebA, other non-32x32 datasets def __init__(self, block, num_blocks, num_classes=10, feat_scale=1, wm=1): super(ResNet, self).__init__() widths = [64, 128, 256, 512] widths = [int(w * wm) for w in widths] self.in_planes = widths[0] self.conv1 = nn.Conv2d(3, self.in_planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(self.in_planes) self.layer1 = self._make_layer(block, widths[0], num_blocks[0], stride=1) self.layer2 = self._make_layer(block, widths[1], num_blocks[1], stride=2) self.layer3 = self._make_layer(block, widths[2], num_blocks[2], stride=2) self.layer4 = self._make_layer(block, widths[3], num_blocks[3], stride=2) self.linear = nn.Linear(feat_scale*widths[3]*block.expansion, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1]*(num_blocks-1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return SequentialWithArgs(*layers) def forward(self, x, with_latent=False, fake_relu=False, no_relu=False): assert (not no_relu), \ "no_relu not yet supported for this architecture" out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out, fake_relu=fake_relu) out = F.avg_pool2d(out, 4) pre_out = out.view(out.size(0), -1) final = self.linear(pre_out) if with_latent: return final, pre_out return final def ResNet18(**kwargs): return ResNet(BasicBlock, [2,2,2,2], **kwargs) def ResNet18Wide(**kwargs): return ResNet(BasicBlock, [2,2,2,2], wm=5, **kwargs) def ResNet18Thin(**kwargs): return ResNet(BasicBlock, [2,2,2,2], wd=.75, **kwargs) def ResNet34(**kwargs): return ResNet(BasicBlock, [3,4,6,3], **kwargs) def ResNet50(**kwargs): return ResNet(Bottleneck, [3,4,6,3], **kwargs) def ResNet101(**kwargs): return ResNet(Bottleneck, [3,4,23,3], **kwargs) def ResNet152(**kwargs): return ResNet(Bottleneck, [3,8,36,3], **kwargs) resnet50 = ResNet50 resnet18 = ResNet18 resnet101 = ResNet101 resnet152 = ResNet152 resnet18wide = ResNet18Wide # resnet18thin = ResNet18Thin def test(): net = ResNet18() y = net(torch.randn(1,3,32,32)) print(y.size())
5,685
34.31677
102
py
Riptide
Riptide-master/scripts/autotune_model.py
import os import numpy as np import tensorflow as tf import argparse import tvm from tvm import autotvm from tvm import relay from tvm.relay import testing import tvm.relay.testing.tf as tf_testing from tvm.autotvm.tuner import XGBTuner, GATuner, GridSearchTuner from tvm.contrib.util import tempdir import tvm.contrib.graph_runtime as runtime import riptide.models from riptide.get_models import get_model from riptide.binary.binary_layers import Config, DQuantize, XQuantize os.environ["CUDA_VISIBLE_DEVICES"] = '' parser = argparse.ArgumentParser() parser.add_argument('--activation_bits', type=int, default=1, help='number of activation bits', required=False) parser.add_argument('--model', type=str, choices=['vggnet', 'vgg11', 'resnet18', 'alexnet', 'darknet'], help='neural network model', required=True) parser.add_argument('--trials', type=int, default=50, help='number of tuning trials', required=False) parser.add_argument('--tuner', type=str, default='xgb', choices=['xgb', 'random', 'grid'], help='autotvm tuning algorithm.', required=False) parser.add_argument('--log_file', type=str, default='log.log', help='logfile to store tuning results', required=False) args = parser.parse_args() model = args.model activation_bits = args.activation_bits trials = args.trials tuner = args.tuner log_file = args.log_file config = Config(actQ=DQuantize, weightQ=XQuantize, bits=activation_bits, use_act=False, use_bn=False, use_maxpool=True) with config: model = get_model(model) #model = riptide.models.normal_vggnet.vggnet() # Init model shapes. input_shape = [1, 224, 224, 3] test_input = tf.keras.Input(shape=[224, 224, 3], batch_size=1, dtype='float32') output = model(test_input) print("Test run of model", output) # Parse model to relay net, params = relay.frontend.from_keras(model, shape={'input_1': input_shape}) num_threads = 12 os.environ["TVM_NUM_THREADS"] = str(num_threads) target = "llvm" ctx = tvm.cpu(0) # Set up tuning options tuning_option = { 'log_filename': log_file, 'tuner': tuner, 'early_stopping': None, 'n_trial': trials, 'measure_option': autotvm.measure_option( builder=autotvm.LocalBuilder(), runner=autotvm.LocalRunner(number=10, repeat=1, min_repeat_ms=150), ), } def tune_kernels(tasks, measure_option, tuner='xgb', n_trial=100, early_stopping=None, log_filename='tuning.log'): for i, tsk in enumerate(tasks): prefix = "[Task %2d/%2d] " % (i+1, len(tasks)) # Converting conv2d tasks to conv2d_NCHWc tasks. Do we actually want this? op_name = tsk.workload[0] input_shape = tsk.workload[1][0:-1] kernel_shape = tsk.workload[2][0:-1] input_channels = input_shape[1] # Only can convert to NCHWc if input channels is divisible by 8. #convertible = input_channels % 8 == 0 func_create = tsk.name #if op_name == 'conv2d': # func_create = 'topi_x86_conv2d_NCHWc' #elif op_name == 'depthwise_conv2d_nchw': # func_create = 'topi_x86_depthwise_conv2d_NCHWc_from_nchw' print(func_create) task = autotvm.task.create(func_create, args=tsk.args, target=target, template_key='direct') task.workload = tsk.workload # Create tuner. if tuner == 'xgb' or tuner == 'xbg-rank': tuner_obj = XGBTuner(task, loss_type='rank') elif tuner == 'ga': tuner_obj = GATuner(task, pop_size=50) elif tuner == 'gridsearch': tuner_obj = GridSearchTuner(task) else: raise ValueError("Invalid tunder: " + tuner) # Do tuning. n_trial = min(n_trial, len(task.config_space)) tuner_obj.tune(n_trial=n_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(n_trial, prefix=prefix), autotvm.callback.log_to_file(log_filename)]) # Launch jobs and evaluate performance. def tune_and_evaluate(tuning_opt): print("Extract tasks...") global net, params, input_shape tasks = autotvm.task.extract_from_program( net, target=target, params=params, ops=(relay.op.nn.conv2d, relay.op.nn.dense, relay.op.nn.bitserial_conv2d, relay.op.nn.bitserial_dense)) # Run tuning tasks. print("Tuning...") tune_kernels(tasks, **tuning_opt) # compile kernels with historgy best records. with autotvm.apply_history_best(log_file): print("Compile...") with relay.build_config(opt_level=2): graph, lib, params = relay.build_module.build( net, target=target, params=params) # Upload parameters to device. ctx = tvm.cpu() data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype('float32')) module = runtime.create(graph, lib, ctx) module.set_input('input_1', data_tvm) module.set_input(**params) # evaluate print("Evaluate inference time cost...") ftimer = module.module.time_evaluator("run", ctx, number=10, repeat=1) prof_res = np.array(ftimer().results) * 1000 # Convert to milliseconds print("Mean inference time (std dev): %.2f ms (%.2f ms)" % (np.mean(prof_res), np.std(prof_res))) tune_and_evaluate(tuning_option)
5,505
34.070064
147
py
Riptide
Riptide-master/scripts/train_imagenet.py
import os import multiprocessing import tensorflow as tf import tensorflow_datasets as tfds from functools import partial from riptide.get_models import get_model from riptide.utils.thread_helper import setup_gpu_threadpool from riptide.binary.binary_layers import Config, DQuantize, XQuantize from riptide.utils.preprocessing.inception_preprocessing import preprocess_image from absl import app from absl import flags from absl import logging FLAGS = flags.FLAGS flags.DEFINE_string('model_dir', '/data/jwfromm/models', 'Directory to save models in.') flags.DEFINE_string('model', '', 'Name of model to train, must be set.') flags.DEFINE_string( 'experiment', '', 'Suffix to add to model name, should describe purpose of run.') flags.DEFINE_string('gpus', '', 'Comma seperated list of GPUS to run on.') flags.DEFINE_integer('epochs', 480, 'Number of epochs to train.') flags.DEFINE_integer('batch_size', 64, 'Size of each minibatch.') flags.DEFINE_integer('image_size', 224, 'Height and Width of processed images.') flags.DEFINE_float('learning_rate', .0128, 'Starting learning rate.') flags.DEFINE_float('wd', 1e-4, 'Weight decay loss coefficient.') flags.DEFINE_float('momentum', 0.9, 'Momentum used for optimizer.') flags.DEFINE_bool('binary', 0, 'Use a binary network.') flags.DEFINE_float('bits', 2.0, 'Number of activation bits to use for binary model.') def main(argv): # Set visible GPUS appropriately. os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpus # Get thread confirguration. op_threads, num_workers = setup_gpu_threadpool(len(FLAGS.gpus.split(','))) num_gpus = len(FLAGS.gpus.split(',')) # Set up the data input functions. def train_preprocess(data): img = preprocess_image( data["image"], height=FLAGS.image_size, width=FLAGS.image_size, is_training=True) return img, data["label"] def eval_preprocess(data): img = preprocess_image( data["image"], height=FLAGS.image_size, width=FLAGS.image_size, is_training=False) return img, data["label"] def train_input_fn(): ds = tfds.load( "imagenet2012:5.0.0", split=tfds.Split.TRAIN, shuffle_files=True) ds = ds.shuffle(buffer_size=10000) ds = ds.map(train_preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = ds.batch(FLAGS.batch_size) ds = ds.prefetch(tf.data.experimental.AUTOTUNE) return ds.repeat(FLAGS.epochs) def eval_input_fn(): ds = tfds.load( "imagenet2012:5.0.0", split=tfds.Split.VALIDATION) ds = ds.map(eval_preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = ds.batch(FLAGS.batch_size) ds = ds.prefetch(tf.data.experimental.AUTOTUNE) return ds # Set up estimaor model function. def model_fn(features, labels, mode): # Generate summary for input images. tf.compat.v1.summary.image('images', features, max_outputs=4) if FLAGS.binary: actQ = DQuantize weightQ = XQuantize bits = FLAGS.bits use_act = False use_bn = False use_maxpool = True else: actQ = None weightQ = None bits = None use_act = True use_bn = True use_maxpool = True config = Config( actQ=actQ, weightQ=weightQ, bits=bits, use_act=use_act, use_bn=use_bn, use_maxpool=use_maxpool) with config: model = get_model(FLAGS.model) global_step = tf.compat.v1.train.get_or_create_global_step() learning_rate = tf.compat.v1.train.cosine_decay_restarts( FLAGS.learning_rate, global_step, 1000) tf.compat.v1.summary.scalar('learning_rate', learning_rate) optimizer = tf.compat.v1.train.MomentumOptimizer( learning_rate=learning_rate, momentum=FLAGS.momentum, use_nesterov=False) loss_object = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE) def loss_fn(labels, predictions): per_example_loss = loss_object(labels, predictions) return tf.nn.compute_average_loss(per_example_loss, global_batch_size=FLAGS.batch_size) # Get proper mode for batchnorm and dropout, must be python bool. training = (mode == tf.estimator.ModeKeys.TRAIN) predictions = model(features, training=training) total_loss = loss_fn(labels, predictions) reg_losses = model.get_losses_for(None) + model.get_losses_for(features) if reg_losses: total_loss += tf.math.add_n(reg_losses) # Compute training metrics. accuracy = tf.compat.v1.metrics.accuracy( labels=labels, predictions=tf.math.argmax(predictions, axis=-1), name='acc_op') accuracy_top_5 = tf.compat.v1.metrics.mean( tf.math.in_top_k( predictions=predictions, targets=tf.reshape(labels, [-1]), k=5, name='top_5_op')) metrics = {'accuracy': accuracy, 'accuracy_top_5': accuracy_top_5} # Now define optimizer function. update_ops = model.get_updates_for(features) + model.get_updates_for( None) with tf.control_dependencies(update_ops): train_op = optimizer.minimize( total_loss, var_list=model.trainable_variables, global_step=global_step) # Keep track of training accuracy. if mode == tf.estimator.ModeKeys.TRAIN: tf.compat.v1.summary.scalar('train_accuracy', accuracy[1]) tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1]) return tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, eval_metric_ops=metrics) # Now we're ready to configure our estimator and train. # Determine proper name for this model. full_model_path = os.path.join(FLAGS.model_dir, "%s_%s" % (FLAGS.model, FLAGS.experiment)) # Figure out which GPUS to run on. if num_gpus > 1: strategy = tf.distribute.MirroredStrategy() else: strategy = None session_config = tf.compat.v1.ConfigProto( inter_op_parallelism_threads=op_threads, intra_op_parallelism_threads=op_threads, allow_soft_placement=True, ) session_config.gpu_options.allow_growth = True run_config = tf.estimator.RunConfig( save_summary_steps=500, log_step_count_steps=500, save_checkpoints_secs=3600, train_distribute=strategy, session_config=session_config) classifier = tf.estimator.Estimator( model_fn=model_fn, model_dir=full_model_path, config=run_config) train_spec = tf.estimator.TrainSpec( input_fn=train_input_fn, max_steps=None) eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn) tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec) if __name__ == '__main__': app.run(main)
7,430
36.720812
109
py
Riptide
Riptide-master/scripts/autotune_model_rpi.py
import os import numpy as np import tensorflow as tf import argparse import tvm from tvm import autotvm from tvm import relay from tvm.relay import testing import tvm.relay.testing.tf as tf_testing from tvm.autotvm.tuner import XGBTuner, GATuner, GridSearchTuner from tvm.contrib.util import tempdir from tvm.contrib import util import tvm.contrib.graph_runtime as runtime import riptide.models from riptide.get_models import get_model from riptide.binary.binary_layers import Config, DQuantize, XQuantize os.environ["CUDA_VISIBLE_DEVICES"] = '' device_key = 'rpi3b' target = tvm.target.arm_cpu("rasp3b") target_host = 'llvm -device=arm_cpu -target=arm-linux-gnueabihf -mattr=+neon' parser = argparse.ArgumentParser() parser.add_argument( '--activation_bits', type=int, default=1, help='number of activation bits', required=False) parser.add_argument( '--model', type=str, choices=['vggnet', 'vgg11', 'resnet18', 'alexnet_normal', 'alexnet', 'darknet', 'squeezenet', 'squeezenet_normal', 'vggnet_normal'], help='neural network model', required=True) parser.add_argument( '--trials', type=int, default=50, help='number of tuning trials', required=False) parser.add_argument( '--tuner', type=str, default='ga', choices=['xgb', 'ga', 'gridsearch'], help='autotvm tuning algorithm.', required=False) parser.add_argument( '--log_file', type=str, default='log.log', help='logfile to store tuning results', required=False) parser.add_argument( '--unipolar', action='store_false', help='Whether to use bipolar or unipolar quantization') args = parser.parse_args() model = args.model activation_bits = args.activation_bits trials = args.trials tuner = args.tuner log_file = args.log_file config = Config( actQ=DQuantize, weightQ=XQuantize, bits=activation_bits, use_act=False, use_bn=False, use_maxpool=True, bipolar=args.unipolar) with config: model = get_model(model) #model = riptide.models.vggnet_normal.vggnet() # Init model shapes. test_input = tf.keras.Input(shape=[224, 224, 3], batch_size=1, dtype='float32') output = model(test_input) # Parse model to relay with target: net, params = relay.frontend.from_keras( model, shape={ 'input_1': [1, 224, 224, 3] }, layout='NHWC') net = net['main'] num_threads = 4 os.environ["TVM_NUM_THREADS"] = str(num_threads) # Set up tuning options tuning_option = { 'log_filename': log_file, 'tuner': tuner, 'early_stopping': None, 'n_trial': trials, 'measure_option': autotvm.measure_option( builder=autotvm.LocalBuilder(build_func='default'), runner=autotvm.RPCRunner( device_key, host='fleet.cs.washington.edu', port=9190, number=1, timeout=20)), } def tune_kernels(tasks, measure_option, tuner=tuner, n_trial=trials, early_stopping=None, log_filename=log_file): for i, tsk in enumerate(tasks): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # Converting conv2d tasks to conv2d_NCHWc tasks. Do we actually want this? op_name = tsk.workload[0] #input_shape = tsk.workload[1][0:-1] #kernel_shape = tsk.workload[2][0:-1] #input_channels = input_shape[1] # Only can convert to NCHWc if input channels is divisible by 8. #convertible = input_channels % 8 == 0 func_create = tsk.name #if op_name == 'conv2d': # func_create = 'topi_x86_conv2d_NCHWc' #elif op_name == 'depthwise_conv2d_nchw': # func_create = 'topi_x86_depthwise_conv2d_NCHWc_from_nchw' print(func_create) task = autotvm.task.create( func_create, args=tsk.args, target=target, template_key='direct') task.workload = tsk.workload # Create tuner. if tuner == 'xgb' or tuner == 'xbg-rank': tuner_obj = XGBTuner(task, loss_type='rank') elif tuner == 'ga': tuner_obj = GATuner(task, pop_size=50) elif tuner == 'gridsearch': tuner_obj = GridSearchTuner(task) else: raise ValueError("Invalid tunder: " + tuner) # Do tuning. n_trial = min(n_trial, len(task.config_space)) tuner_obj.tune( n_trial=n_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(n_trial, prefix=prefix), autotvm.callback.log_to_file(log_filename) ]) # Launch jobs and evaluate performance. def tune_and_evaluate(tuning_opt): print("Extract tasks...") global net, params tasks = autotvm.task.extract_from_program( net, target=target, params=params, ops=(relay.op.nn.conv2d, relay.op.nn.bitserial_conv2d, relay.op.nn.bitserial_dense, relay.op.nn.nn.dense)) # Run tuning tasks. print("Tuning...") tune_kernels(tasks, **tuning_opt) # compile kernels with historgy best records. with autotvm.apply_history_best(log_file): print("Compile...") with relay.build_config(opt_level=3): graph, lib, params = relay.build_module.build( net, target=target, params=params) batch_size = 1 num_class = 1000 image_shape = (224, 224, 3) data_shape = (batch_size, ) + image_shape tmp = util.tempdir() lib_fname = tmp.relpath('net.tar') lib.export_library(lib_fname) # Upload module to device print("Upload...") remote = autotvm.measure.request_remote( device_key, 'fleet.cs.washington.edu', 9190, timeout=10000) # upload the library to remote device and load it remote.upload(lib_fname) rlib = remote.load_module('net.tar') # create the remote runtime module ctx = remote.cpu(0) module = runtime.create(graph, rlib, ctx) # set parameter (upload params to the remote device. This may take a while) module.set_input(**params) module.set_input( 'input_1', tvm.nd.array( np.random.uniform(size=image_shape).astype('float32'))) module.run() # Evaluate print("Evaluate inference time cost...") ftimer = module.module.time_evaluator("run", ctx, number=10, repeat=1) prof_res = np.array(ftimer().results) * 1000 # Convert to milliseconds print("Mean inference time (std dev): %.2f ms (%.2f ms)" % (np.mean(prof_res), np.std(prof_res))) tune_and_evaluate(tuning_option)
6,799
28.694323
136
py
Riptide
Riptide-master/scripts/measure_rpi.py
import os import numpy as np import tensorflow as tf import argparse import tvm from tvm import autotvm from tvm import relay from tvm.relay import testing import tvm.relay.testing.tf as tf_testing from tvm.autotvm.tuner import XGBTuner, GATuner, GridSearchTuner from tvm.contrib.util import tempdir from tvm.contrib import util #import tvm.contrib.graph_runtime as runtime import tvm.contrib.debugger.debug_runtime as runtime import riptide.models from riptide.get_models import get_model from riptide.binary.binary_layers import Config, DQuantize, XQuantize os.environ["CUDA_VISIBLE_DEVICES"] = '' device_key = 'rpi3b' target = tvm.target.arm_cpu("rasp3b") target_host = 'llvm -device=arm_cpu -target=arm-linux-gnueabihf -mattr=+neon' parser = argparse.ArgumentParser() parser.add_argument( '--activation_bits', type=int, default=1, help='number of activation bits', required=False) parser.add_argument( '--model', type=str, choices=['vggnet', 'vgg11', 'resnet18', 'alexnet_normal', 'alexnet', 'darknet', 'squeezenet', 'squeezenet_normal', 'squeezenet_batchnorm', 'vggnet_normal'], help='neural network model', required=True) parser.add_argument( '--log_file', type=str, default='log.log', help='logfile to store tuning results', required=False) parser.add_argument( '--unipolar', action='store_false', help='Whether to use bipolar or unipolar quantization') args = parser.parse_args() model = args.model activation_bits = args.activation_bits log_file = args.log_file config = Config( actQ=DQuantize, weightQ=XQuantize, bits=activation_bits, use_act=False, use_bn=False, use_maxpool=True, bipolar=args.unipolar) with config: model = get_model(model) #model = riptide.models.vggnet_normal.vggnet() # Init model shapes. test_input = tf.keras.Input(shape=[224, 224, 3], batch_size=1, dtype='float32') output = model(test_input) # Parse model to relay with target: net, params = relay.frontend.from_keras( model, shape={ 'input_1': [1, 224, 224, 3] }, layout='NHWC') num_threads = 4 os.environ["TVM_NUM_THREADS"] = str(num_threads) with autotvm.apply_history_best(log_file): print("Compile...") with relay.build_config(opt_level=3): graph, lib, params = relay.build_module.build( net, target=target, params=params) batch_size = 1 num_class = 1000 image_shape = (224, 224, 3) data_shape = (batch_size, ) + image_shape tmp = util.tempdir() lib_fname = tmp.relpath('net.tar') lib.export_library(lib_fname) # Upload module to device print("Upload...") #remote = autotvm.measure.request_remote( # device_key, 'fleet.cs.washington.edu', 9190, timeout=10000) remote = tvm.rpc.connect('jwfromm-rpi', 9090) # upload the library to remote device and load it remote.upload(lib_fname) rlib = remote.load_module('net.tar') # create the remote runtime module ctx = remote.cpu(0) module = runtime.create(graph, rlib, ctx) # set parameter (upload params to the remote device. This may take a while) module.set_input(**params) module.set_input( 'input_1', tvm.nd.array( np.random.uniform(size=image_shape).astype('float32'))) module.run() # Evaluate print("Evaluate inference time cost...") ftimer = module.module.time_evaluator("run", ctx, number=1, repeat=1) prof_res = np.array(ftimer().results) * 1000 # Convert to milliseconds print("Mean inference time (std dev): %.2f ms (%.2f ms)" % (np.mean(prof_res), np.std(prof_res)))
3,782
29.756098
160
py
Riptide
Riptide-master/scripts/anneal/train_imagenet.py
import os import multiprocessing import tensorflow as tf from functools import partial from riptide.utils.datasets import imagerecord_dataset from riptide.utils.thread_helper import setup_gpu_threadpool from riptide.anneal.anneal_config import Config from riptide.anneal.models import get_model, get_optimizer from riptide.utils.preprocessing.inception_preprocessing import preprocess_image from absl import app from absl import flags from absl import logging FLAGS = flags.FLAGS flags.DEFINE_string('model_dir', '/data/jwfromm/anneal_models', 'Directory to save models in.') flags.DEFINE_string('model', '', 'Name of model to train, must be set.') flags.DEFINE_string( 'experiment', '', 'Suffix to add to model name, should describe purpose of run.') flags.DEFINE_string('data_path', '/data/imagenet/tfrecords', 'Directory containing tfrecords to load.') flags.DEFINE_string('gpus', '', 'Comma seperated list of GPUS to run on.') flags.DEFINE_integer('epochs', 100, 'Number of epochs to train.') flags.DEFINE_integer('batch_size', 64, 'Size of each minibatch.') flags.DEFINE_integer('image_size', 224, 'Height and Width of processed images.') flags.DEFINE_float('learning_rate', .0128, 'Starting learning rate.') flags.DEFINE_float('momentum', 0.9, 'Momentum used for optimizer.') flags.DEFINE_boolean('quantize', 0, 'Use a quantized network.') flags.DEFINE_float('a_bits', 2.0, 'Number of activation bits to use for binary model.') flags.DEFINE_float('w_bits', 2.0, 'Number of activation bits to use for binary model.') def main(argv): # Set visible GPUS appropriately. os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpus # Get thread confirguration. op_threads, num_workers = setup_gpu_threadpool(len(FLAGS.gpus.split(','))) num_gpus = len(FLAGS.gpus.split(',')) # Set up the data input functions. train_preprocess = partial( preprocess_image, height=FLAGS.image_size, width=FLAGS.image_size, is_training=True) eval_preprocess = partial( preprocess_image, height=FLAGS.image_size, width=FLAGS.image_size, is_training=False) def train_input_fn(): ds = imagerecord_dataset( FLAGS.data_path, FLAGS.batch_size, is_training=True, preprocess=train_preprocess, num_workers=num_workers) return ds.repeat(FLAGS.epochs) def eval_input_fn(): ds = imagerecord_dataset( FLAGS.data_path, FLAGS.batch_size, is_training=False, preprocess=eval_preprocess, num_workers=num_workers) return ds.repeat(1) # Set up estimaor model function. def model_fn(features, labels, mode): # Generate summary for input images. tf.compat.v1.summary.image('images', features, max_outputs=4) if FLAGS.quantize: a_bits = FLAGS.a_bits w_bits = FLAGS.w_bits quantize = True fixed = False# Do standard dorefa quantization else: a_bits = None w_bits = None quantize = False fixed = False config = Config(quantize=quantize, a_bits=a_bits, w_bits=w_bits, fixed=fixed) with config: model = get_model(FLAGS.model) global_step = tf.compat.v1.train.get_or_create_global_step() optimizer, learning_rate = get_optimizer(FLAGS.model, global_step, FLAGS.batch_size, num_gpus) loss_object = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE) def loss_fn(labels, predictions): per_example_loss = loss_object(labels, predictions) return tf.nn.compute_average_loss(per_example_loss, global_batch_size=FLAGS.batch_size) # Track learning rate. tf.compat.v1.summary.scalar('learning_rate', learning_rate) # Get proper mode for batchnorm and dropout, must be python bool. training = (mode == tf.estimator.ModeKeys.TRAIN) predictions = model(features, training=training) total_loss = loss_fn(labels, predictions) reg_losses = model.get_losses_for(None) + model.get_losses_for(features) if reg_losses: total_loss += tf.math.add_n(reg_losses) # Compute training metrics. accuracy = tf.compat.v1.metrics.accuracy( labels=labels, predictions=tf.math.argmax(predictions, axis=-1), name='acc_op') accuracy_top_5 = tf.compat.v1.metrics.mean( tf.math.in_top_k( predictions=predictions, targets=tf.reshape(labels, [-1]), k=5, name='top_5_op')) metrics = {'accuracy': accuracy, 'accuracy_top_5': accuracy_top_5} update_ops = model.get_updates_for(features) + model.get_updates_for( None) with tf.control_dependencies(update_ops): train_op = optimizer.minimize( total_loss, var_list=model.trainable_variables, global_step=global_step) # Keep track of training accuracy. if mode == tf.estimator.ModeKeys.TRAIN: tf.compat.v1.summary.scalar('train_accuracy', accuracy[1]) tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1]) return tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, eval_metric_ops=metrics) # Now we're ready to configure our estimator and train. # Determine proper name for this model. full_model_path = os.path.join(FLAGS.model_dir, "%s_%s" % (FLAGS.model, FLAGS.experiment)) # Figure out which GPUS to run on. if num_gpus > 1: strategy = tf.distribute.MirroredStrategy() else: strategy = None session_config = tf.compat.v1.ConfigProto( inter_op_parallelism_threads=op_threads, intra_op_parallelism_threads=op_threads, allow_soft_placement=True) session_config.gpu_options.allow_growth = True run_config = tf.estimator.RunConfig( save_summary_steps=500, log_step_count_steps=500, save_checkpoints_secs=3600, train_distribute=strategy, session_config=session_config) classifier = tf.estimator.Estimator( model_fn=model_fn, model_dir=full_model_path, config=run_config) train_spec = tf.estimator.TrainSpec( input_fn=train_input_fn, max_steps=None) eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn) tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec) if __name__ == '__main__': app.run(main)
6,920
37.45
109
py
Riptide
Riptide-master/riptide/binary/binary_layers.py
import tensorflow as tf import tensorflow.keras as keras from .binary_funcs import * from functools import partial from tensorflow.python.keras import backend as K from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.framework import common_shapes from tensorflow.python.keras.engine.base_layer import InputSpec from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.utils import tf_utils from tensorflow.python.training import distribution_strategy_context from tensorflow.python.util.tf_export import tf_export from tensorflow.keras.layers import GlobalAveragePooling2D, Flatten, Activation, PReLU, Input, Concatenate """Quantization scope, defines the modification of operator""" class Config(object): """Configuration scope of current mode. This is used to easily switch between different model structure variants by simply calling into these functions. Parameters ---------- actQ : function Activation quantization weightQ : function: name->function Maps name to quantize function. bits : Tensor When using HWGQ binarization, these are the possible values that can be used in approximation. For other binarization schemes, this should be the number of bits to use. use_bn: bool Whether to apply batch normalization at the end of the layer or not. use_qadd: bool If true, do quantization before addition in Qadd layers. Example ------- import qnn with qnn.Config(actQ=qnn.quantize(bits=8, scale=8, signed=True), weightQ=qnn.identity, use_bn=True): net = qnn.get_model(model_name, **kwargs) """ current = None def __init__(self, actQ=None, weightQ=None, bits=None, use_bn=True, use_maxpool=True, use_act=True, bipolar=False, shiftnorm_scale=1.0, use_qadd=False): if actQ is not None: actQ = partial(actQ, bipolar=bipolar) self.actQ = actQ if actQ else lambda x: x self.weightQ = weightQ if weightQ else lambda x: x self.bits = bits self.use_bn = use_bn self.use_act = use_act self.bipolar = bipolar self.shiftnorm_scale = shiftnorm_scale self.use_qadd = use_qadd self.use_maxpool = use_maxpool def __enter__(self): self._old_manager = Config.current Config.current = self return self def __exit__(self, ptype, value, trace): Config.current = self._old_manager class BinaryConv2D(keras.layers.Conv2D): def __init__(self, *args, **kwargs): super(BinaryConv2D, self).__init__(*args, **kwargs) self.scope = Config.current self.actQ = self.scope.actQ self.weightQ = self.scope.weightQ self.bits = self.scope.bits self.use_act = self.scope.use_act if self.use_act and self.activation is None: self.activation = Activation('relu') self.bipolar = self.scope.bipolar def call(self, inputs): with tf.name_scope("actQ"): tf.compat.v1.summary.histogram('prebinary_activations', inputs) if self.bits is not None: inputs = self.actQ(inputs, float(self.bits)) else: inputs = self.actQ(inputs) tf.compat.v1.summary.histogram('binary_activations', inputs) with tf.name_scope("weightQ"): kernel = self.weightQ(self.kernel) tf.compat.v1.summary.histogram('weights', self.kernel) tf.compat.v1.summary.histogram('binary_weights', kernel) # If bipolar quantization is used, pad with -1 instead of 0. if self.bipolar: inputs = inputs + 1.0 outputs = self._convolution_op(inputs, kernel) if self.bipolar: readjust_val = -1.0 * tf.reduce_sum(kernel, axis=[0, 1, 2]) outputs = outputs + readjust_val if self.use_bias: if self.data_format == 'channels_first': outputs = tf.nn.bias_add( outputs, self.bias, data_format='NCHW') else: outputs = tf.nn.bias_add( outputs, self.bias, data_format='NHWC') if self.use_act and self.activation is not None: outputs = self.activation(outputs) return outputs class BinaryDense(keras.layers.Dense): def __init__(self, *args, **kwargs): super(BinaryDense, self).__init__(*args, **kwargs) self.scope = Config.current self.actQ = self.scope.actQ self.weightQ = self.scope.weightQ self.bits = self.scope.bits self.use_act = self.scope.use_act if self.use_act and self.activation is None: self.activation = Activation('relu') def call(self, inputs): inputs = tf.convert_to_tensor(inputs, dtype=self.dtype) with tf.name_scope("actQ"): tf.compat.v1.summary.histogram('prebinary_activations', inputs) if self.bits is not None: inputs = self.actQ(inputs, float(self.bits)) else: inputs = self.actQ(inputs) tf.compat.v1.summary.histogram('binary_activations', inputs) with tf.name_scope("weightQ"): kernel = self.weightQ(self.kernel) tf.compat.v1.summary.histogram('weights', self.kernel) tf.compat.v1.summary.histogram('binary_weights', kernel) rank = common_shapes.rank(inputs) if rank > 2: # Broadcasting is required for the inputs. outputs = tf.tensordot(inputs, kernel, [[rank - 1], [0]]) # Reshape the output back to the original ndim of the input. if not context.executing_eagerly(): shape = inputs.get_shape().as_list() output_shape = shape[:-1] + [self.units] outputs.set_shape(output_shape) else: outputs = tf.matmul(inputs, kernel) if self.use_bias: outputs = tf.nn.bias_add(outputs, self.bias) if self.use_act and self.activation is not None: outputs = self.activation(outputs) # pylint: disable=not-callable return outputs class Scalu(keras.layers.Layer): def __init__(self, scale=0.001): super(Scalu, self).__init__() self.scale = scale def build(self, input_shape): self.scale = self.add_variable( 'scale', shape=[1], initializer=tf.keras.initializers.Constant(value=self.scale)) def call(self, input): return input * self.scale class QAdd(keras.layers.Layer): def __init__(self): super(QAdd, self).__init__() self.scope = Config.current self.bits = self.scope.bits self.act = self.scope.actQ self.use_q = self.scope.use_qadd def build(self, input_shape): self.scale = self.add_variable('scale', shape=[1], initializer='ones') def call(self, inputs): x = inputs[0] y = inputs[1] if self.use_q: x = self.act(x, self.bits) y = self.act(y, self.bits) with tf.name_scope("AP2"): approx_scale = AP2(self.scale) output = approx_scale * (x + y) else: output = x + y return output class EnterInteger(keras.layers.Layer): def __init__(self, scale): super(EnterInteger, self).__init__() self.scale = scale self.scope = Config.current self.bits = self.scope.bits self.quantize = self.scope.bits != None def call(self, inputs): return self.scale * inputs class ExitInteger(keras.layers.Layer): def call(self, inputs): return inputs class SpecialBatchNormalization(keras.layers.BatchNormalization): def __init__(self, *args, **kwargs): super(SpecialBatchNormalization, self).__init__(**kwargs) def call(self, inputs, training=None): return super(SpecialBatchNormalization, self).call(inputs, training) class ResidualConnect(Layer): def __init__(self, *args, **kwargs): super(ResidualConnect, self).__init__() def call(self, inputs): return tf.concat(inputs, axis=-1) def BatchNormalization(*args, **kwargs): scope = Config.current if scope.use_bn: return SpecialBatchNormalization(*args, **kwargs) else: return ShiftNormalization(*args, **kwargs) class UnfusedBatchNorm(keras.layers.BatchNormalization): def __init__(self, *args, **kwargs): super(UnfusedBatchNorm, self).__init__(*args, **kwargs) def MaxPool2D(*args, **kwargs): scope = Config.current if scope.use_maxpool: return keras.layers.MaxPool2D(*args, **kwargs) else: return lambda x: x class ShiftNormalization(Layer): """Shift normalization layer Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. Arguments: axis: Integer, the axis that should be normalized (typically the features axis). For instance, after a `Conv2D` layer with `data_format="channels_first"`, set `axis=1` in `BatchNormalization`. binary_dense: Set true when using after a binary dense layer. Need some special handling for batchnorm for binary dense layers to prevent small variance. momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling will be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: Optional constraint for the beta weight. gamma_constraint: Optional constraint for the gamma weight. renorm: Whether to use Batch Renormalization (https://arxiv.org/abs/1702.03275). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. References: - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167) """ def __init__(self, previous_layer, axis=-1, momentum=0.99, epsilon=1e-3, center=False, scale=False, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99, trainable=True, name=None, **kwargs): super(ShiftNormalization, self).__init__( name=name, trainable=trainable, **kwargs) self.scope = Config.current self.bits = self.scope.bits self.previous_layer = previous_layer if isinstance(axis, list): self.axis = axis[:] else: self.axis = axis self.binary_dense = isinstance(previous_layer, BinaryDense) self.momentum = momentum self.epsilon = epsilon self.center = center self.scale = scale self.extra_scale = self.scope.shiftnorm_scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.moving_mean_initializer = initializers.get( moving_mean_initializer) self.moving_variance_initializer = initializers.get( moving_variance_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint) self.renorm = renorm self.supports_masking = True self._bessels_correction_test_only = True if renorm: renorm_clipping = renorm_clipping or {} keys = ['rmax', 'rmin', 'dmax'] if set(renorm_clipping) - set(keys): raise ValueError('renorm_clipping %s contains keys not in %s' % (renorm_clipping, keys)) self.renorm_clipping = renorm_clipping self.renorm_momentum = renorm_momentum def build(self, input_shape): input_shape = tf.TensorShape(input_shape) if not input_shape.ndims: raise ValueError('Input has undefined rank:', input_shape) ndims = len(input_shape) # Convert axis to list and resolve negatives if isinstance(self.axis, int): self.axis = [self.axis] if not isinstance(self.axis, list): raise TypeError( 'axis must be int or list, type given: %s' % type(self.axis)) for idx, x in enumerate(self.axis): if x < 0: self.axis[idx] = ndims + x # Validate axes for x in self.axis: if x < 0 or x >= ndims: raise ValueError('Invalid axis: %d' % x) if len(self.axis) != len(set(self.axis)): raise ValueError('Duplicate axis: %s' % self.axis) # Raise parameters of fp16 batch norm to fp32 if self.dtype == tf.float16 or self.dtype == tf.bfloat16: param_dtype = tf.float32 else: param_dtype = self.dtype or tf.float32 axis_to_dim = {x: input_shape[x] for x in self.axis} for x in axis_to_dim: if axis_to_dim[x] is None: raise ValueError( 'Input has undefined `axis` dimension. Input shape: ', input_shape) self.input_spec = InputSpec(ndim=ndims, axes=axis_to_dim) if self.binary_dense: param_shape = [1] elif len(axis_to_dim) == 1: # Single axis batch norm (most common/default use-case) param_shape = (list(axis_to_dim.values())[0], ) else: # Parameter shape is the original shape but with 1 in all non-axis dims param_shape = [ axis_to_dim[i] if i in axis_to_dim else 1 for i in range(ndims) ] if self.scale: self.gamma = self.add_weight( name='gamma', shape=param_shape, dtype=param_dtype, initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, trainable=True) else: self.gamma = None if self.center: self.beta = self.add_weight( name='beta', shape=param_shape, dtype=param_dtype, initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, trainable=True) else: self.beta = None try: # Disable variable partitioning when creating the moving mean and variance if hasattr(self, '_scope') and self._scope: partitioner = self._scope.partitioner self._scope.set_partitioner(None) else: partitioner = None self.moving_mean = self.add_weight( name='moving_mean', shape=param_shape, dtype=param_dtype, initializer=self.moving_mean_initializer, synchronization=tf.VariableSynchronization.ON_READ, trainable=False, aggregation=tf.VariableAggregation.MEAN) self.moving_variance = self.add_weight( name='moving_variance', shape=param_shape, dtype=param_dtype, initializer=self.moving_variance_initializer, synchronization=tf.VariableSynchronization.ON_READ, trainable=False, aggregation=tf.VariableAggregation.MEAN) if self.renorm: # Create variables to maintain the moving mean and standard deviation. # These are used in training and thus are different from the moving # averages above. The renorm variables are colocated with moving_mean # and moving_variance. # NOTE: below, the outer `with device` block causes the current device # stack to be cleared. The nested ones use a `lambda` to set the desired # device and ignore any devices that may be set by the custom getter. def _renorm_variable(name, shape): var = self.add_weight( name=name, shape=shape, dtype=param_dtype, initializer=tf.zeros_initializer(), synchronization=tf.VariableSynchronization.ON_READ, trainable=False, aggregation=tf.VariableAggregation.MEAN) return var with distribution_strategy_context.get_distribution_strategy( ).colocate_vars_with(self.moving_mean): self.renorm_mean = _renorm_variable( 'renorm_mean', param_shape) self.renorm_mean_weight = _renorm_variable( 'renorm_mean_weight', ()) # We initialize renorm_stddev to 0, and maintain the (0-initialized) # renorm_stddev_weight. This allows us to (1) mix the average # stddev with the minibatch stddev early in training, and (2) compute # the unbiased average stddev by dividing renorm_stddev by the weight. with distribution_strategy_context.get_distribution_strategy( ).colocate_vars_with(self.moving_variance): self.renorm_stddev = _renorm_variable( 'renorm_stddev', param_shape) self.renorm_stddev_weight = _renorm_variable( 'renorm_stddev_weight', ()) finally: if partitioner: self._scope.set_partitioner(partitioner) self.built = True def _assign_moving_average(self, variable, value, momentum): with tf.compat.v1.name_scope(None, 'AssignMovingAvg', [variable, value, momentum]) as scope: with tf.compat.v1.colocate_with(variable): decay = tf.convert_to_tensor(1.0 - momentum, name='decay') if decay.dtype != variable.dtype.base_dtype: decay = tf.cast(decay, variable.dtype.base_dtype) update_delta = ( variable - tf.cast(value, variable.dtype)) * decay return tf.compat.v1.assign_sub(variable, update_delta, name=scope) def _renorm_correction_and_moments(self, mean, variance, training): """Returns the correction and update values for renorm.""" stddev = tf.sqrt(variance + self.epsilon) # Compute the average mean and standard deviation, as if they were # initialized with this batch's moments. mixed_renorm_mean = (self.renorm_mean + (1. - self.renorm_mean_weight) * mean) mixed_renorm_stddev = (self.renorm_stddev + (1. - self.renorm_stddev_weight) * stddev) # Compute the corrections for batch renorm. r = stddev / mixed_renorm_stddev d = (mean - mixed_renorm_mean) / mixed_renorm_stddev # Ensure the corrections use pre-update moving averages. with tf.control_dependencies([r, d]): mean = tf.identity(mean) stddev = tf.identity(stddev) rmin, rmax, dmax = [ self.renorm_clipping.get(key) for key in ['rmin', 'rmax', 'dmax'] ] if rmin is not None: r = tf.maximum(r, rmin) if rmax is not None: r = tf.minimum(r, rmax) if dmax is not None: d = tf.maximum(d, -dmax) d = tf.minimum(d, dmax) # When not training, use r=1, d=0. r = tf_utils.smart_cond(training, lambda: r, lambda: tf.ones_like(r)) d = tf_utils.smart_cond(training, lambda: d, lambda: tf.zeros_like(d)) def _update_renorm_variable(var, weight, value): """Updates a moving average and weight, returns the unbiased value.""" value = tf.identity(value) def _do_update(): """Updates the var and weight, returns their updated ratio.""" # Update the variables without zero debiasing. The debiasing will be # accomplished by dividing the exponential moving average by the weight. # For example, after a single update, the moving average would be # (1-decay) * value. and the weight will be 1-decay, with their ratio # giving the value. # Make sure the weight is not updated until before r and d computation. with tf.control_dependencies([value]): weight_value = tf.constant(1., dtype=weight.dtype) new_var = self._assign_moving_average(var, value, self.renorm_momentum) new_weight = self._assign_moving_average( weight, weight_value, self.renorm_momentum) # TODO(yuefengz): the updates to var and weighted can not be batched # together if we fetch their updated values here. Consider calculating # new values and delaying the updates. return new_var / new_weight def _fake_update(): return tf.identity(var) return tf_utils.smart_cond(training, _do_update, _fake_update) # TODO(yuefengz): colocate the operations new_mean = _update_renorm_variable(self.renorm_mean, self.renorm_mean_weight, mean) new_stddev = _update_renorm_variable(self.renorm_stddev, self.renorm_stddev_weight, stddev) # Make sqrt(moving_variance + epsilon) = new_stddev. new_variance = tf.square(new_stddev) - self.epsilon return (r, d, new_mean, new_variance) def call(self, inputs, training=None): # Extract weights of previous layer to compute proper scale. previous_weights = self.previous_layer.weights[0].value() original_training_value = training if training is None: training = K.learning_phase() in_eager_mode = tf.executing_eagerly() # Compute the axes along which to reduce the mean / variance input_shape = inputs.get_shape() ndims = len(input_shape) # For dense layers, require a full reduction. if self.binary_dense: reduction_axes = [i for i in range(ndims)] # Otherwise, reduce all but the feature axis. else: reduction_axes = [i for i in range(ndims) if i not in self.axis] # Broadcasting only necessary for single-axis batch norm where the axis is # not the last dimension broadcast_shape = [1] * ndims broadcast_shape[self.axis[0]] = input_shape[self.axis[0]] def _broadcast(v): if (v is not None and len(v.get_shape()) != ndims and reduction_axes != list(range(ndims - 1))): return tf.reshape(v, broadcast_shape) return v scale, offset = _broadcast(self.gamma), _broadcast(self.beta) def _compose_transforms(scale, offset, then_scale, then_offset): if then_scale is not None: scale *= then_scale offset *= then_scale if then_offset is not None: offset += then_offset return (scale, offset) # Determine a boolean value for `training`: could be True, False, or None. training_value = tf_utils.constant_value(training) if training_value is not False: # Some of the computations here are not necessary when training==False # but not a constant. However, this makes the code simpler. keep_dims = len(self.axis) > 1 mean, variance = tf.compat.v1.nn.moments( inputs, reduction_axes, keep_dims=keep_dims) # When norming the output of a binary dense layer, # need to make sure shape is maintained. if self.binary_dense: mean = tf.reshape(mean, [1]) variance = tf.reshape(variance, [1]) moving_mean = self.moving_mean moving_variance = self.moving_variance mean = tf_utils.smart_cond(training, lambda: mean, lambda: moving_mean) variance = tf_utils.smart_cond(training, lambda: variance, lambda: moving_variance) new_mean, new_variance = mean, variance if self.renorm: r, d, new_mean, new_variance = self._renorm_correction_and_moments( new_mean, new_variance, training) # When training, the normalized values (say, x) will be transformed as # x * gamma + beta without renorm, and (x * r + d) * gamma + beta # = x * (r * gamma) + (d * gamma + beta) with renorm. r = _broadcast(tf.stop_gradient(r, name='renorm_r')) d = _broadcast(tf.stop_gradient(d, name='renorm_d')) scale, offset = _compose_transforms(r, d, scale, offset) def _do_update(var, value): if in_eager_mode and not self.trainable: return return self._assign_moving_average(var, value, self.momentum) mean_update = tf_utils.smart_cond( training, lambda: _do_update(self.moving_mean, new_mean), lambda: self.moving_mean) variance_update = tf_utils.smart_cond( training, lambda: _do_update(self.moving_variance, new_variance), lambda: self.moving_variance) if not tf.executing_eagerly(): self.add_update(mean_update, inputs=True) self.add_update(variance_update, inputs=True) else: mean, variance = self.moving_mean, self.moving_variance mean = tf.cast(mean, inputs.dtype) variance = tf.cast(variance, inputs.dtype) if offset is not None: offset = tf.cast(offset, inputs.dtype) #outputs = nn.batch_normalization(inputs, _broadcast(mean), # _broadcast(variance), offset, scale, # self.epsilon) approximate_std, quantized_means = compute_quantized_shiftnorm( variance, mean, self.epsilon, previous_weights, self.extra_scale, self.bits, rescale=True) outputs = inputs - quantized_means outputs = outputs * approximate_std if scale: outputs = scale * outputs if offset: outputs = outputs + offset # If some components of the shape got lost due to adjustments, fix that. outputs.set_shape(input_shape) if not tf.executing_eagerly() and original_training_value is None: outputs._uses_learning_phase = True # pylint: disable=protected-access return outputs def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = { 'axis': self.axis, 'momentum': self.momentum, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': initializers.serialize(self.beta_initializer), 'gamma_initializer': initializers.serialize(self.gamma_initializer), 'moving_mean_initializer': initializers.serialize(self.moving_mean_initializer), 'moving_variance_initializer': initializers.serialize(self.moving_variance_initializer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 'beta_constraint': constraints.serialize(self.beta_constraint), 'gamma_constraint': constraints.serialize(self.gamma_constraint) } # Only add TensorFlow-specific parameters if they are set, so as to preserve # model compatibility with external Keras. if self.renorm: config['renorm'] = True config['renorm_clipping'] = self.renorm_clipping config['renorm_momentum'] = self.renorm_momentum base_config = super(ShiftNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items())) NormalDense = keras.layers.Dense NormalConv2D = keras.layers.Conv2D NormalMaxPool2D = keras.layers.MaxPool2D NormalBatchNormalization = keras.layers.BatchNormalization
31,802
39.461832
106
py
Riptide
Riptide-master/riptide/binary/binary_funcs.py
import numpy as np import tensorflow as tf import tensorflow.keras as keras from .bit_approximations import load_clusters, load_bits def log2(x): return tf.math.log(x) / tf.math.log(2.0) @tf.custom_gradient def AP2(x): #x = tf.clip_by_value(x, 1e-7, 1.0) # Positive ap2 might be fine y = 2**(tf.round(log2(tf.abs(x)))) def grad_fn(dy): return [dy] return y, grad_fn def get_numpy(sess, x): if not isinstance(x, list): x = [x] with sess.as_default(): output = sess.run(x) if len(output) == 1: output = output[0] return output def compute_quantized_shiftnorm(variance, mean, epsilon, previous_weights, extra_scale, bits, rescale=True): # Compute number of bits to shift. std_factor = (1.0 / (extra_scale * tf.sqrt(variance + epsilon))) with tf.name_scope('AP2'): approximate_std = AP2(std_factor) # Now determine number of bits needed, the sum of weight scale # bits and shift norm scale bits. weight_scale_ap2, _ = get_quantize_bits(previous_weights) weight_scale_bits = -log2(weight_scale_ap2) weight_scale_bits = tf.reshape(weight_scale_bits, [-1]) total_shift_bits = weight_scale_bits + bits # Quantizing the mean is a little tricky, start by determining # the quantization scale. mean_scale = 1.0 + ((1.0 / (2.0**bits - 1.0)) * (1.0 - (1.0 / 2.0**weight_scale_bits))) # Now quantize each channel of mean appropriately. with tf.name_scope('FPQ'): quantized_means = FixedPointQuantize(mean, mean_scale, total_shift_bits, rescale) return approximate_std, quantized_means def get_shiftnorm_ap2(layer, previous_weights, rescale=False): mean = layer.weights[0].value() extra_scale = layer.extra_scale epsilon = layer.epsilon variance = layer.weights[1].value() bits = layer.bits approximate_std, quantized_means = compute_quantized_shiftnorm( variance, mean, epsilon, previous_weights, extra_scale, bits, rescale) return approximate_std, quantized_means def get_quantize_bits(x): if len(x.shape) > 2: mean = tf.reduce_mean(tf.abs(tf.reshape(x, [-1, x.shape[-1]])), axis=0) else: mean = tf.reduce_mean(tf.abs(x)) # Fix dimensions of mean for i in range(len(x.shape) - 1): mean = tf.expand_dims(mean, axis=0) bits = tf.cast(x >= 0, tf.float32) bits = (2 * bits) - 1 with tf.name_scope("AP2"): approximate_mean = AP2(mean) return approximate_mean, bits # Fixed point quantize operator that supports per_channel scales and bitwidth. # Assumes min and max value are both scale. @tf.custom_gradient def FixedPointQuantize(inputs, scale, bits, rescale): # Start by clipping values between specified range. y = tf.clip_by_value(inputs, -scale, scale) # Determine floating point value of each bit. bit_value = scale / (2.0**bits - 1.0) # Quantize tensor. y = y / bit_value y = tf.round(y) # Readjust to floating point if specified. y = tf.cond(rescale, true_fn=lambda: y * bit_value, false_fn=lambda: y) def grad_fn(dy): grad_mask = tf.cast(tf.abs(inputs) <= scale, tf.float32) dx = grad_mask * dy return [dx, None, None, None] return y, grad_fn @tf.custom_gradient def XQuantize(x): mean, bits = get_quantize_bits(x) y = mean * bits def grad_fn(dy): # Use a larger gradient cutoff to allow weights to grow if needed. # This can effect the scales based on the means of kernels. # Likely has no significant effect though. gradient_cutoff = 10.0 grad_mask = tf.cast(tf.abs(x) <= gradient_cutoff, tf.float32) # Allow weights to move off away from 1 if needed. leaky_grad_mask = tf.cast( tf.logical_or( tf.logical_and(x > gradient_cutoff, dy > 0), tf.logical_and(x < -gradient_cutoff, dy < 0)), tf.float32) dx = grad_mask * dy + 0.1 * leaky_grad_mask * dy return [dx] return y, grad_fn @tf.custom_gradient def Quantize(x): bits = tf.cast(x >= 0, tf.float32) bits = (2 * bits) - 1 y = bits def grad_fn(dy): #grad_mask_greater = tf.cast(tf.abs(x) >= 1, tf.float32) #grad_mask_lesser = tf.cast(tf.abs(x) <= 1, tf.float32) # Let big values leak a little #grad_mask = 0.1 * grad_mask_greater + grad_mask_lesser grad_mask = tf.cast(tf.abs(x) <= 1, tf.float32) dx = grad_mask * dy return [dx] return y, grad_fn def get_HWGQ_bits(x, clusters): # Computes HWG quantization and returns the integer binary value. for i in range(len(x.shape)): # need to reshape clusters properly. clusters = tf.expand_dims(clusters, axis=0) # Add new data axis for proper subtraction. x = tf.expand_dims(x, axis=-1) # Compute best fitting cluster for each value in data. distance = tf.abs(x - clusters) indices = tf.argmin(distance, axis=-1) return indices @tf.custom_gradient def HWGQuantize(x, clusters): indices = get_HWGQ_bits(x, clusters) y = tf.gather(clusters, indices) def grad_fn(dy): max_cluster = tf.reduce_max(clusters) min_cluster = tf.reduce_min(clusters) grad_filter = tf.logical_and(min_cluster <= x, x <= max_cluster) dx = dy * tf.cast(grad_filter, tf.float32) return [dx, None] return y, grad_fn # Assumes input is clipped to [0, 1] @tf.custom_gradient def DQ(x, bits, bipolar): # Use small adjustment to avoid rounding inconsistency. # Adjust for bipolar if needed. x = tf.cond(bipolar, lambda: (x + 1.0) / 2.0, lambda: x) epsilon = 1e-5 # Round to nearest linear bin in [0, 1]. output = (1.0 / (2.0**bits - 1.0)) * tf.round((2.0**bits - 1.0) * x + epsilon) # Deconvert back to [-1, 1] if bipolar. output = tf.cond(bipolar, lambda: (output - 0.5) * 2.0, lambda: output) # Pass through gradient. def grad_fn(dy): return [dy, None, None] return output, grad_fn def DQuantize(x, bits, bipolar=False): # Apply clipping in [0, 1] with associated gradient. if bipolar: x = tf.clip_by_value(x, -1, 1) else: x = tf.clip_by_value(x, 0, 1) # Quantize linearlly. return DQ(x, bits, bipolar) def DQuantizeW(x, bits): x = tf.tanh(x) / (2.0 * tf.reduce_max(tf.abs(tf.tanh(x)))) + 0.5 return (2. * DQuantize(x, bits)) - 1.0 def DQuantizeBits(x, bits, bipolar=False): if bipolar: x = tf.clip_by_value(x, -1, 1) x = (x + 1.0) / 2.0 else: x = tf.clip_by_value(x, 0, 1) return tf.round(x * (2.0**bits - 1.0) + epsilon) def DQuantizeBitsW(x, bits): shifted_x = (tf.tanh(x) / (2.0 * tf.reduce_max(tf.abs(tf.tanh(x))))) + 0.5 return DQuantizeBits(shifted_x) # Takes bit value x and converts it to floating point approximation. def DBits2Value(x, bits): return x / (2.0**bits - 1.0) def DBits2ValueW(x, bits): approx = DBits2Value(x, bits) return 2.0 * (approx - 0.5)
7,331
29.297521
79
py
Riptide
Riptide-master/riptide/binary/models/q_resnetv1b.py
"""ResNetV1bs, implemented in tf Keras.""" # pylint: disable=arguments-differ,unused-argument,missing-docstring,dangerous-default-value import os import tensorflow as tf from .. import binary_layers as nn #from tensorflow.keras.models import Sequential from riptide.utils.sequential import forward class BasicBlockV1b(tf.keras.Model): """ResNetV1b BasicBlockV1b """ expansion = 1 def __init__(self, planes, strides=1, dilation=1, downsample=None, previous_dilation=1, norm_layer=None, norm_kwargs={}, data_format='channels_last', **kwargs): super(BasicBlockV1b, self).__init__() self.conv1 = nn.Conv2D( filters=planes, kernel_size=3, strides=strides, padding='same', dilation_rate=dilation, use_bias=False, data_format=data_format) self.conv2 = nn.Conv2D( filters=planes, kernel_size=3, strides=1, padding='same', dilation_rate=previous_dilation, use_bias=False, data_format=data_format) self.downsample = downsample self.strides = strides def call(self, x): residual = x out = forward(x, self.conv1) out = forward(out, self.conv2) if self.downsample is not None: residual = forward(x, self.downsample) out = out + residual return out class BottleneckV1b(tf.keras.Model): """ResNetV1b BottleneckV1b """ # pylint: disable=unused-argument expansion = 4 def __init__(self, planes, strides=1, dilation=1, downsample=None, previous_dilation=1, norm_layer=None, norm_kwargs={}, last_gamma=False, data_format='channels_last', **kwargs): super(BottleneckV1b, self).__init__() self.conv1 = nn.Conv2D( filters=planes, kernel_size=1, use_bias=False, data_format=data_format) self.conv2 = nn.Conv2D( filters=planes, kernel_size=3, strides=strides, padding='same', dilation_rate=dilation, use_bias=False, data_format=data_format) self.conv3 = nn.Conv2D( filters=planes * 4, kernel_size=1, use_bias=False, data_format=data_format) self.downsample = downsample self.dilation = dilation self.strides = strides def call(self, x): residual = x out = forward(x, self.conv1) out = forward(out, self.conv2) out = forward(out, self.conv3) if self.downsample is not None: residual = forward(x, self.downsample) out = out + residual return out class ResNetV1b(tf.keras.Model): """ Pre-trained ResNetV1b Model, which preduces the strides of 8 featuremaps at conv5. Parameters ---------- block : Block Class for the residual block. Options are BasicBlockV1, BottleneckV1. layers : list of int Numbers of layers in each block classes : int, default 1000 Number of classification classes. dilated : bool, default False Applying dilation strategy to pretrained ResNet yielding a stride-8 model, typically used in Semantic Segmentation. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`; for Synchronized Cross-GPU BachNormalization). last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. deep_stem : bool, default False Whether to replace the 7x7 conv1 with 3 3x3 convolution layers. avg_down : bool, default False Whether to use average pooling for projection skip connection between stages/downsample. final_drop : float, default 0.0 Dropout ratio before the final classification layer. Reference: - He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016. - Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions." """ # pylint: disable=unused-variable def __init__(self, block, layers, classes=1000, data_format='channels_last', dilated=False, norm_layer=nn.BatchNormalization, norm_kwargs={}, last_gamma=False, deep_stem=False, stem_width=32, avg_down=False, final_drop=0.0, name_prefix='', **kwargs): self.inplanes = stem_width * 2 if deep_stem else 64 self.data_format = data_format super(ResNetV1b, self).__init__(name=name_prefix) self.norm_kwargs = norm_kwargs with tf.name_scope(self.name): if not deep_stem: self.conv1 = nn.Conv2DBatchNorm( filters=64, kernel_size=7, strides=2, padding='same', use_bias=False, data_format=data_format) else: self.conv1 = ['conv1'] self.conv1.append( nn.Conv2DBatchNorm( filters=stem_width, kernel_size=3, strides=2, padding='same', use_bias=False, data_format=data_format)) self.conv1.append( nn.Conv2D( filters=stem_width, kernel_size=3, strides=1, padding='same', use_bias=False, data_format=data_format)) self.conv1.append( nn.Conv2D( filters=stem_width * 2, kernel_size=3, strides=1, padding='same', use_bias=False, data_format=data_format)) self.maxpool = nn.MaxPool2D( pool_size=3, strides=2, padding='same', data_format=data_format) self.layer1 = self._make_layer( 1, block, 64, layers[0], avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) self.layer2 = self._make_layer( 2, block, 128, layers[1], strides=2, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) if dilated: self.layer3 = self._make_layer( 3, block, 256, layers[2], strides=1, dilation=2, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) self.layer4 = self._make_layer( 4, block, 512, layers[3], strides=1, dilation=4, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) else: self.layer3 = self._make_layer( 3, block, 256, layers[2], strides=2, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) self.layer4 = self._make_layer( 4, block, 512, layers[3], strides=2, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) self.avgpool = nn.GlobalAveragePooling2D(data_format=data_format) self.flat = nn.Flatten() self.drop = None if final_drop > 0.0: self.drop = nn.Dropout(final_drop) self.fc = [] self.fc.append(nn.Dense(units=classes, use_bias=False)) self.fc.append(nn.Scalu()) def _make_layer(self, stage_index, block, planes, blocks, strides=1, dilation=1, avg_down=False, norm_layer=None, last_gamma=False, data_format='channels_last'): downsample = None if strides != 1 or self.inplanes != planes * block.expansion: downsample = ['down%d' % stage_index] if avg_down: if dilation == 1: downsample.append( nn.AveragePooling2D( pool_size=strides, strides=strides, padding='same', data_format=data_format)) else: downsample.append( nn.AveragePooling2D( pool_size=1, strides=1, padding='same', data_format=data_format)) downsample.append( nn.Conv2D( filters=planes * block.expansion, kernel_size=1, strides=1, use_bias=False, data_format=data_format)) else: downsample.append( nn.Conv2D( filters=planes * block.expansion, kernel_size=1, strides=strides, use_bias=False, data_format=data_format)) layers = ['layers%d' % stage_index] if dilation in (1, 2): layers.append( block( planes, strides, dilation=1, downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer, norm_kwargs=self.norm_kwargs, last_gamma=last_gamma, data_format=data_format)) elif dilation == 4: layers.append( block( planes, strides, dilation=2, downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer, norm_kwargs=self.norm_kwargs, last_gamma=last_gamma, data_format=data_format)) else: raise RuntimeError("=> unknown dilation size: {}".format(dilation)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( planes, dilation=dilation, previous_dilation=dilation, norm_layer=norm_layer, norm_kwargs=self.norm_kwargs, last_gamma=last_gamma, data_format=data_format)) return layers def call(self, x): if self.data_format == 'channels_first': x = tf.transpose(x, [0, 3, 1, 2]) x = forward(x, self.conv1) x = forward(x, self.maxpool) x = forward(x, self.layer1) x = forward(x, self.layer2) x = forward(x, self.layer3) x = forward(x, self.layer4) x = forward(x, self.avgpool) x = forward(x, self.flat) if self.drop is not None: x = forward(x, self.drop) x = forward(x, self.fc) return x def resnet18_v1b(**kwargs): """Constructs a ResNetV1b-18 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; for Synchronized Cross-GPU BachNormalization). last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BasicBlockV1b, [2, 2, 2, 2], name_prefix='resnetv1b', **kwargs) return model def resnet34_v1b(**kwargs): """Constructs a ResNetV1b-34 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BasicBlockV1b, [3, 4, 6, 3], name_prefix='resnetv1b', **kwargs) return model def resnet50_v1b(**kwargs): """Constructs a ResNetV1b-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], name_prefix='resnetv1b', **kwargs) return model def resnet101_v1b(**kwargs): """Constructs a ResNetV1b-101 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], name_prefix='resnetv1b', **kwargs) return model def resnet152_v1b(**kwargs): """Constructs a ResNetV1b-152 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], name_prefix='resnetv1b', **kwargs) return model def resnet50_v1c(**kwargs): """Constructs a ResNetV1c-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], deep_stem=True, name_prefix='resnetv1c_', **kwargs) return model def resnet101_v1c(**kwargs): """Constructs a ResNetV1c-101 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], deep_stem=True, name_prefix='resnetv1c_', **kwargs) return model def resnet152_v1c(**kwargs): """Constructs a ResNetV1b-152 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], deep_stem=True, name_prefix='resnetv1c_', **kwargs) return model def resnet50_v1d(**kwargs): """Constructs a ResNetV1d-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], deep_stem=True, avg_down=True, name_prefix='resnetv1d_', **kwargs) return model def resnet101_v1d(**kwargs): """Constructs a ResNetV1d-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], deep_stem=True, avg_down=True, name_prefix='resnetv1d_', **kwargs) return model def resnet152_v1d(**kwargs): """Constructs a ResNetV1d-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], deep_stem=True, avg_down=True, name_prefix='resnetv1d_', **kwargs) return model def resnet50_v1e(**kwargs): """Constructs a ResNetV1e-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], deep_stem=True, avg_down=True, stem_width=64, name_prefix='resnetv1e_', **kwargs) return model def resnet101_v1e(**kwargs): """Constructs a ResNetV1e-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], deep_stem=True, avg_down=True, stem_width=64, name_prefix='resnetv1e_', **kwargs) return model def resnet152_v1e(**kwargs): """Constructs a ResNetV1e-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], deep_stem=True, avg_down=True, stem_width=64, name_prefix='resnetv1e_', **kwargs) return model def resnet50_v1s(**kwargs): """Constructs a ResNetV1s-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], deep_stem=True, stem_width=64, name_prefix='resnetv1s_', **kwargs) return model def resnet101_v1s(**kwargs): """Constructs a ResNetV1s-101 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], deep_stem=True, stem_width=64, name_prefix='resnetv1s_', **kwargs) return model def resnet152_v1s(**kwargs): """Constructs a ResNetV1s-152 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], deep_stem=True, stem_width=64, name_prefix='resnetv1s_', **kwargs) return model
22,333
30.679433
100
py
Riptide
Riptide-master/riptide/binary/models/q_cifar_resnet.py
import os import tensorflow as tf from .. import binary_layers as nn #from tensorflow.keras.models import Sequential from riptide.utils.sequential import forward_layer_list def _conv3x3(channels, stride, normal=False): if normal: return nn.Conv2DBatchNorm( channels, kernel_size=3, strides=stride, padding="same", use_bias=False) else: return nn.Conv2D( channels, kernel_size=3, strides=stride, padding="same", use_bias=False) class CIFARBasicBlockV1(tf.keras.Model): """BasicBlock V1 from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. This is used for ResNet V1 for 18, 34 layers. Parameters ---------- channels : int Number of output channels. stride : int Stride size. downsample : bool, default False Whether to downsample the input. """ def __init__(self, channels, stride, downsample=False, first=False, **kwargs): super(CIFARBasicBlockV1, self).__init__(**kwargs) self.body = [] if downsample: self.downsample = [] else: self.downsample = None self.body.append(_conv3x3(channels, stride, normal=first)) self.body.append(_conv3x3(channels, 1)) if self.downsample is not None: self.downsample.append( nn.Conv2D( channels, kernel_size=1, strides=stride, use_bias=False)) self.add = nn.QAdd() def call(self, x): residual = x x = forward_layer_list(x, self.body) if self.downsample is not None: residual = forward_layer_list(residual, self.downsample) x = self.add([residual, x]) return x class CIFARBasicBlockV2(tf.keras.Model): """BasicBlock V2 from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. This is used for ResNet V2 for 18, 34 layers. Parameters ---------- channels : int Number of output channels. stride : int Stride size. downsample : bool, default False Whether to downsample the input. in_channels : int, default 0 Number of input channels. Default is 0, to infer from the graph. """ def __init__(self, channels, stride, downsample=False, first=False, **kwargs): super(CIFARBasicBlockV2, self).__init__(**kwargs) self.body = [] if downsample: self.downsample = [] else: self.downsample = None self.body.append(_conv3x3(channels, stride, normal=first)) self.body.append(_conv3x3(channels, 1)) if self.downsample is not None: self.downsample.append( nn.Conv2D( channels, kernel_size=1, strides=stride, use_bias=False)) self.add = nn.QAdd() def call(self, x): residual = x x = forward_layer_list(x, self.body) if self.downsample is not None: residual = forward_layer_list(residual, self.downsample) return self.add([x, residual]) class CIFARResNetV1(tf.keras.Model): """ResNet V1 model from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. Parameters ---------- block : HybridBlock Class for the residual block. Options are CIFARBasicBlockV1, BottleneckV1. layers : list of int Numbers of layers in each block channels : list of int Numbers of channels in each block. Length should be one larger than layers list. classes : int, default 10 Number of classification classes. """ def __init__(self, block, layers, channels, classes=10, **kwargs): super(CIFARResNetV1, self).__init__(**kwargs) with tf.name_scope("CIFARResNetV1"): self.features = [] self.features.append( nn.Conv2DBatchNorm( channels[0], kernel_size=3, strides=1, padding="same", use_bias=False)) for i, num_layer in enumerate(layers): stride = 1 if i == 0 else 2 self.features.append( self._make_layer( block, num_layer, channels[i + 1], stride, i + 1, in_channels=channels[i])) self.features.append(nn.GlobalAveragePooling2D()) self.output_layer = [] self.output_layer.append(nn.Dense(classes, use_bias=False)) self.output_layer.append(nn.Scalu()) def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0): layer = [] layer.append(block(channels, stride, channels != in_channels)) for _ in range(layers - 1): layer.append(block(channels, 1, False)) return layer def call(self, x): x = forward_layer_list(x, self.features) x = forward_layer_list(x, self.output_layer) return x class CIFARResNetV2(tf.keras.Model): """ResNet V2 model from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- block : HybridBlock Class for the residual block. Options are CIFARBasicBlockV1, BottleneckV1. layers : list of int Numbers of layers in each block channels : list of int Numbers of channels in each block. Length should be one larger than layers list. classes : int, default 10 Number of classification classes. """ def __init__(self, block, layers, channels, classes=10, **kwargs): super(CIFARResNetV2, self).__init__(**kwargs) assert len(layers) == len(channels) - 1 self.features = [] in_channels = channels[0] first = True for i, num_layer in enumerate(layers): stride = 1 if i == 0 else 2 self.features.append( self._make_layer( block, num_layer, channels[i + 1], stride, i + 1, in_channels=in_channels, first=first)) first = False in_channels = channels[i + 1] self.features.append(nn.GlobalAveragePooling2D()) self.features.append(nn.Flatten()) self.output_layer = [] self.output_layer.append(nn.Dense(classes, use_bias=False)) self.output_layer.append(nn.Scalu()) def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0, first=False): layer = [] layer.append( block(channels, stride, channels != in_channels, first=first)) for _ in range(layers - 1): layer.append(block(channels, 1, False)) return layer def call(self, x): x = forward_layer_list(x, self.features) x = forward_layer_list(x, self.output_layer) return x # Specification resnet_net_versions = [CIFARResNetV1, CIFARResNetV2] resnet_block_versions = [CIFARBasicBlockV1, CIFARBasicBlockV2] def _get_resnet_spec(num_layers): assert (num_layers - 2) % 6 == 0 n = (num_layers - 2) // 6 channels = [16, 16, 32, 64] layers = [n] * (len(channels) - 1) return layers, channels # Constructor def get_cifar_resnet(version, num_layers, **kwargs): r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. ResNet V2 model from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- version : int Version of ResNet. Options are 1, 2. num_layers : int Numbers of layers. Needs to be an integer in the form of 6*n+2, e.g. 20, 56, 110, 164. """ layers, channels = _get_resnet_spec(num_layers) resnet_class = resnet_net_versions[version - 1] block_class = resnet_block_versions[version - 1] net = resnet_class(block_class, layers, channels, **kwargs) return net def cifar_resnet20_v1(**kwargs): r"""ResNet-20 V1 model for CIFAR10 from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(1, 20, **kwargs) def cifar_resnet56_v1(**kwargs): r"""ResNet-56 V1 model for CIFAR10 from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(1, 56, **kwargs) def cifar_resnet110_v1(**kwargs): r"""ResNet-110 V1 model for CIFAR10 from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(1, 110, **kwargs) def cifar_resnet20_v2(**kwargs): r"""ResNet-20 V2 model for CIFAR10 from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(2, 20, **kwargs) def cifar_resnet56_v2(**kwargs): r"""ResNet-56 V2 model for CIFAR10 from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(2, 56, **kwargs) def cifar_resnet110_v2(**kwargs): r"""ResNet-110 V2 model for CIFAR10 from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(2, 110, **kwargs)
12,031
30.170984
94
py
Riptide
Riptide-master/riptide/models/squeezenet_batchnorm.py
import tensorflow as tf from riptide.binary import binary_layers as nn bnmomemtum=0.9 class SqueezeNet(tf.keras.Model): def __init__(self, classes=1000): super(SqueezeNet, self).__init__() self.classes = classes self.c0 = nn.NormalConv2D(kernel_size=7, strides=4, filters=96, padding='same', activation='relu') self.b0 = nn.NormalBatchNormalization(momentum=bnmomemtum) #self.mp0 = nn.MaxPool2D(pool_size=2) self.enter_int = nn.EnterInteger(scale=1.0) # Fire 1 self.f1c1 = nn.BinaryConv2D(filters=32, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f1b1 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f1c2 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f1b2 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f1c3 = nn.BinaryConv2D(filters=64, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f1b3 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f1concat = tf.keras.layers.Concatenate(axis=-1) # Fire 2 self.f2c1 = nn.BinaryConv2D(filters=32, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f2b1 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f2c2 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f2b2 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f2c3 = nn.BinaryConv2D(filters=64, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f2b3 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f2concat = tf.keras.layers.Concatenate(axis=-1) # Fire 3 self.f3c1 = nn.BinaryConv2D(filters=32, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f3b1 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f3c2 = nn.BinaryConv2D(filters=128, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f3b2 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f3c3 = nn.BinaryConv2D(filters=128, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f3b3 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f3concat = tf.keras.layers.Concatenate(axis=-1) self.mp3 = tf.keras.layers.MaxPooling2D(pool_size=2) # Fire 4 self.f4c1 = nn.BinaryConv2D(filters=32, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f4b1 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f4c2 = nn.BinaryConv2D(filters=128, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f4b2 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f4c3 = nn.BinaryConv2D(filters=128, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f4b3 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f4concat = tf.keras.layers.Concatenate(axis=-1) # Fire 5 self.f5c1 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f5b1 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f5c2 = nn.BinaryConv2D(filters=192, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f5b2 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f5c3 = nn.BinaryConv2D(filters=192, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f5b3 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f5concat = tf.keras.layers.Concatenate(axis=-1) # Fire 6 self.f6c1 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f6b1 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f6c2 = nn.BinaryConv2D(filters=192, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f6b2 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f6c3 = nn.BinaryConv2D(filters=192, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f6b3 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f6concat = tf.keras.layers.Concatenate(axis=-1) # Fire 7 self.f7c1 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f7b1 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f7c2 = nn.BinaryConv2D(filters=256, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f7b2 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f7c3 = nn.BinaryConv2D(filters=256, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f7b3 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f7concat = tf.keras.layers.Concatenate(axis=-1) self.mp7 = tf.keras.layers.MaxPooling2D(pool_size=2) # Fire 8 self.f8c1 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f8b1 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f8c2 = nn.BinaryConv2D(filters=256, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f8b2 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f8c3 = nn.BinaryConv2D(filters=256, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f8b3 = nn.UnfusedBatchNorm(momentum=bnmomemtum) self.f8concat = tf.keras.layers.Concatenate(axis=-1) self.exit_int = nn.ExitInteger() # Output self.avgpool = tf.keras.layers.GlobalAveragePooling2D() self.classifier = tf.keras.layers.Dense(1000, activation='softmax') def call(self, x, training=None): y = self.c0(x) y = self.b0(y, training=training) #y = self.mp0(y) y = self.enter_int(y) # Fire 1 y = self.f1c1(y) y = self.f1b1(y, training=training) y1x = self.f1c2(y) y1x = self.f1b2(y1x, training=training) y3x = self.f1c3(y) y3x = self.f1b3(y3x, training=training) y = self.f1concat([y1x, y3x]) # Fire 2 y = self.f2c1(y) y = self.f2b1(y, training=training) y1x = self.f2c2(y) y1x = self.f2b2(y1x, training=training) y3x = self.f2c3(y) y3x = self.f2b3(y3x, training=training) y = self.f2concat([y1x, y3x]) # Fire 3 y = self.f3c1(y) y = self.f3b1(y, training=training) y1x = self.f3c2(y) y1x = self.f3b2(y1x, training=training) y3x = self.f3c3(y) y3x = self.f3b3(y3x, training=training) y = self.f3concat([y1x, y3x]) y = self.mp3(y) # Fire 4 y = self.f4c1(y) y = self.f4b1(y, training=training) y1x = self.f4c2(y) y1x = self.f4b2(y1x, training=training) y3x = self.f4c3(y) y3x = self.f4b3(y3x, training=training) y = self.f4concat([y1x, y3x]) # Fire 5 y = self.f5c1(y) y = self.f5b1(y, training=training) y1x = self.f5c2(y) y1x = self.f5b2(y1x, training=training) y3x = self.f5c3(y) y3x = self.f5b3(y3x, training=training) y = self.f5concat([y1x, y3x]) # Fire 6 y = self.f6c1(y) y = self.f6b1(y, training=training) y1x = self.f6c2(y) y1x = self.f6b2(y1x, training=training) y3x = self.f6c3(y) y3x = self.f6b3(y3x, training=training) y = self.f6concat([y1x, y3x]) # Fire 7 y = self.f7c1(y) y = self.f7b1(y, training=training) y1x = self.f7c2(y) y1x = self.f7b2(y1x, training=training) y3x = self.f7c3(y) y3x = self.f7b3(y3x, training=training) y = self.f7concat([y1x, y3x]) y = self.mp7(y) # Fire 8 y = self.f8c1(y) y = self.f8b1(y, training=training) y1x = self.f8c2(y) y1x = self.f8b2(y1x, training=training) y3x = self.f8c3(y) y3x = self.f8b3(y3x, training=training) y = self.f8concat([y1x, y3x]) y = self.exit_int(y) y = self.avgpool(y) y = self.classifier(y) return y
8,191
43.521739
114
py
Riptide
Riptide-master/riptide/models/vggnet_normal.py
import os import tensorflow as tf import tensorflow.keras.layers as nn class vggnet(tf.keras.Model): def __init__(self, classes=1000): super(vggnet, self).__init__() self.conv1 = nn.Conv2D( filters=96, kernel_size=7, strides=2, padding='same', activation=None, use_bias=False) self.pool1 = nn.MaxPool2D(pool_size=2, strides=2) self.bn1 = nn.BatchNormalization(center=False, scale=False) self.conv2 = nn.Conv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn2 = nn.BatchNormalization() self.conv3 = nn.Conv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn3 = nn.BatchNormalization() self.conv4 = nn.Conv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.pool4 = nn.MaxPool2D(pool_size=2, strides=2) self.bn4 = nn.BatchNormalization() self.conv5 = nn.Conv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn5 = nn.BatchNormalization() self.conv6 = nn.Conv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn6 = nn.BatchNormalization() self.conv7 = nn.Conv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.pool7 = nn.MaxPool2D(pool_size=2, strides=2) self.bn7 = nn.BatchNormalization() self.conv8 = nn.Conv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn8 = nn.BatchNormalization() self.conv9 = nn.Conv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn9 = nn.BatchNormalization() self.conv10 = nn.Conv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.pool10 = nn.MaxPool2D(pool_size=2, strides=2) self.bn10 = nn.BatchNormalization() self.flatten = nn.Flatten() self.dense1 = nn.Dense(4096, use_bias=False, activation='relu') self.bn11 = nn.BatchNormalization() self.dense2 = nn.Dense(4096, use_bias=False, activation='relu') self.bn12 = nn.BatchNormalization() self.dense3 = nn.Dense(classes, use_bias=False) self.softmax = nn.Activation('softmax') def call(self, inputs, training=None, debug=False): layers = [] with tf.name_scope('unbinarized'): x = self.conv1(inputs) layers.append(x) x = self.pool1(x) layers.append(x) x = self.bn1(x, training=training) layers.append(x) x = self.conv2(x) layers.append(x) x = self.bn2( x, training=training) layers.append(x) x = self.conv3(x) layers.append(x) x = self.bn3( x, training=training) layers.append(x) x = self.conv4(x) layers.append(x) x = self.pool4(x) layers.append(x) x = self.bn4( x, training=training) layers.append(x) x = self.conv5(x) layers.append(x) x = self.bn5( x, training=training) layers.append(x) x = self.conv6(x) layers.append(x) x = self.bn6( x, training=training) layers.append(x) x = self.conv7(x) layers.append(x) x = self.pool7(x) layers.append(x) x = self.bn7( x, training=training) layers.append(x) x = self.conv8(x) layers.append(x) x = self.bn8( x, training=training) layers.append(x) x = self.conv9(x) layers.append(x) x = self.bn9( x, training=training) layers.append(x) x = self.conv10(x) layers.append(x) x = self.pool10(x) layers.append(x) x = self.bn10( x, training=training) layers.append(x) x = self.flatten(x) layers.append(x) x = self.dense1(x) layers.append(x) x = self.bn11( x, training=training) layers.append(x) x = self.dense2(x) layers.append(x) x = self.bn12( x, training=training) layers.append(x) x = self.dense3(x) layers.append(x) x = self.softmax(x) tf.compat.v1.summary.histogram('output', x) if debug: return layers else: return x
5,389
26.5
71
py
Riptide
Riptide-master/riptide/models/cifarnet.py
import os import tensorflow as tf from riptide.binary import binary_layers as nn #from tensorflow.keras.models import Sequential from riptide.utils.sequential import forward_layer_list class CifarNet(tf.keras.Model): def __init__(self): super(CifarNet, self).__init__() self.conv1 = nn.NormalConv2D( filters=32, kernel_size=3, strides=2, padding='same', activation='relu', use_bias=False) self.bn1 = nn.BatchNormalization() self.conv2 = nn.Conv2D( filters=16, kernel_size=1, strides=1, padding='same', activation='relu', use_bias=False) self.bn2 = nn.BatchNormalization() self.conv3 = nn.Conv2D( filters=64, kernel_size=3, strides=2, padding='same', activation='relu', use_bias=False) self.bn3 = nn.BatchNormalization() self.conv4 = nn.Conv2D( filters=32, kernel_size=1, strides=1, padding='same', activation='relu', use_bias=False) self.bn4 = nn.BatchNormalization() self.conv5 = nn.Conv2D( filters=128, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn5 = nn.BatchNormalization() self.conv6 = nn.Conv2D( filters=64, kernel_size=1, strides=1, padding='same', activation='relu', use_bias=False) self.bn6 = nn.BatchNormalization() self.conv7 = nn.Conv2D( filters=32, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn7 = nn.BatchNormalization() self.global_pooling = nn.GlobalAveragePooling2D() self.dense = nn.NormalDense(10, use_bias=False) def call(self, inputs, training=None): with tf.name_scope('normal'): x = self.conv1(inputs) self.conv1.kernel) x = self.bn1(x, training=training) x = self.conv2(x) x = self.bn2(x, training=training) x = self.conv3(x) x = self.bn3(x, training=training) x = self.conv4(x) x = self.bn4(x, training=training) x = self.conv5(x) x = self.bn5(x, training=training) x = self.conv6(x) x = self.bn6(x, training=training) x = self.conv7(x) x = self.bn7(x) x = self.global_pooling(x) with tf.name_scope('normal'): x = self.dense(x) return x
2,800
26.732673
57
py
Riptide
Riptide-master/riptide/models/resnet18.py
import os import tensorflow as tf from riptide.binary import binary_layers as nn class resnet18(tf.keras.Model): def __init__(self, classes=1000): super(resnet18, self).__init__() # Input Layer self.conv1 = nn.NormalConv2D( filters=64, kernel_size=7, strides=2, padding='same', activation=None, use_bias=False) self.pool1 = nn.NormalMaxPool2D(pool_size=3, strides=2, padding='same') self.bn1 = nn.NormalBatchNormalization(center=False, scale=False) self.quantize = nn.EnterInteger(1.0) # BasicBlock 1 self.block1_conv1 = nn.BinaryConv2D( filters=64, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block1_bn1 = nn.NormalBatchNormalization() self.block1_conv2 = nn.BinaryConv2D( filters=64, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block1_bn2 = nn.NormalBatchNormalization() self.block1_res = nn.ResidualConnect() # BasicBlock 2 self.block2_conv1 = nn.BinaryConv2D( filters=64, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block2_bn1 = nn.NormalBatchNormalization() self.block2_conv2 = nn.BinaryConv2D( filters=64, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block2_bn2 = nn.NormalBatchNormalization() self.block2_res = nn.ResidualConnect() # BasicBlock 3 self.block3_conv1 = nn.BinaryConv2D( filters=128, kernel_size=3, strides=2, padding='same', activation=None, use_bias=False) self.block3_bn1 = nn.NormalBatchNormalization() self.block3_conv2 = nn.BinaryConv2D( filters=128, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block3_bn2 = nn.NormalBatchNormalization() self.block3_down_conv = nn.BinaryConv2D( filters=128, kernel_size=1, strides=2, padding='valid', activation=None, use_bias=False) self.block3_down_bn = nn.NormalBatchNormalization() self.block3_res = nn.ResidualConnect() # BasicBlock 4 self.block4_conv1 = nn.BinaryConv2D( filters=128, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block4_bn1 = nn.NormalBatchNormalization() self.block4_conv2 = nn.BinaryConv2D( filters=128, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block4_bn2 = nn.NormalBatchNormalization() self.block4_res = nn.ResidualConnect() # BasicBlock 5 self.block5_conv1 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=2, padding='same', activation=None, use_bias=False) self.block5_bn1 = nn.NormalBatchNormalization() self.block5_conv2 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block5_bn2 = nn.NormalBatchNormalization() self.block5_down_conv = nn.BinaryConv2D( filters=256, kernel_size=1, strides=2, padding='valid', activation=None, use_bias=False) self.block5_down_bn = nn.NormalBatchNormalization() self.block5_res = nn.ResidualConnect() # BasicBlock 6 self.block6_conv1 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block6_bn1 = nn.NormalBatchNormalization() self.block6_conv2 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block6_bn2 = nn.NormalBatchNormalization() self.block6_res = nn.ResidualConnect() # BasicBlock 7 self.block7_conv1 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=2, padding='same', activation=None, use_bias=False) self.block7_bn1 = nn.NormalBatchNormalization() self.block7_conv2 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block7_bn2 = nn.NormalBatchNormalization() self.block7_down_conv = nn.BinaryConv2D( filters=512, kernel_size=1, strides=2, padding='valid', activation=None, use_bias=False) self.block7_down_bn = nn.NormalBatchNormalization() self.block7_res = nn.ResidualConnect() # BasicBlock 8 self.block8_conv1 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block8_bn1 = nn.NormalBatchNormalization() self.block8_conv2 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False) self.block8_bn2 = nn.NormalBatchNormalization() self.block8_res = nn.ResidualConnect() self.avg_pool = nn.GlobalAveragePooling2D() self.flatten = nn.Flatten() self.dense = nn.BinaryDense(classes, use_bias=False) self.scalu = nn.Scalu() def call(self, inputs, training=None): with tf.name_scope('unbinarized'): x = self.conv1(inputs) x = self.pool1(x) x = self.bn1(x, training=training) x = self.quantize(x) # Block 1 residual = x x = self.block1_conv1(x) x = self.block1_bn1( x, conv_weights=self.block1_conv1.weights[0].value(), training=training) x = self.block1_conv2(x) x = self.block1_bn2( x, conv_weights=self.block1_conv2.weights[0].value(), training=training) next_res = x x = self.block1_res([x, residual]) # Block 2 residual = next_res x = self.block2_conv1(x) x = self.block2_bn1( x, conv_weights=self.block2_conv1.weights[0].value(), training=training) x = self.block2_conv2(x) x = self.block2_bn2( x, conv_weights=self.block2_conv2.weights[0].value(), training=training) next_res = x x = self.block2_res([x, residual]) # Block 3 residual = next_res x = self.block3_conv1(x) x = self.block3_bn1( x, conv_weights=self.block3_conv1.weights[0].value(), training=training) x = self.block3_conv2(x) x = self.block3_bn2( x, conv_weights=self.block3_conv2.weights[0].value(), training=training) next_res = x residual = self.block3_down_conv(residual) residual = self.block3_down_bn( residual, conv_weights=self.block3_down_conv.weights[0].value(), training=training) x = self.block3_res([x, residual]) # Block 4 residual = next_res x = self.block4_conv1(x) x = self.block4_bn1( x, conv_weights=self.block4_conv1.weights[0].value(), training=training) x = self.block4_conv2(x) x = self.block4_bn2( x, conv_weights=self.block4_conv2.weights[0].value(), training=training) next_res = x x = self.block4_res([x, residual]) # Block 5 residual = next_res x = self.block5_conv1(x) x = self.block5_bn1( x, conv_weights=self.block5_conv1.weights[0].value(), training=training) x = self.block5_conv2(x) x = self.block5_bn2( x, conv_weights=self.block5_conv2.weights[0].value(), training=training) next_res = x residual = self.block5_down_conv(residual) residual = self.block5_down_bn( residual, conv_weights=self.block5_down_conv.weights[0].value(), training=training) x = self.block5_res([x, residual]) # Block 6 residual = next_res x = self.block6_conv1(x) x = self.block6_bn1( x, conv_weights=self.block6_conv1.weights[0].value(), training=training) x = self.block6_conv2(x) x = self.block6_bn2( x, conv_weights=self.block6_conv2.weights[0].value(), training=training) next_res = x x = self.block6_res([x, residual]) # Block 7 residual = next_res x = self.block7_conv1(x) x = self.block7_bn1( x, conv_weights=self.block7_conv1.weights[0].value(), training=training) x = self.block7_conv2(x) x = self.block7_bn2( x, conv_weights=self.block7_conv2.weights[0].value(), training=training) next_res = x residual = self.block7_down_conv(residual) residual = self.block7_down_bn( residual, conv_weights=self.block7_down_conv.weights[0].value(), training=training) x = self.block7_res([x, residual]) # Block 8 residual = next_res x = self.block8_conv1(x) x = self.block8_bn1( x, conv_weights=self.block8_conv1.weights[0].value(), training=training) x = self.block8_conv2(x) x = self.block8_bn2( x, conv_weights=self.block8_conv2.weights[0].value(), training=training) x = self.block8_res([x, residual]) # Output layers x = self.avg_pool(x) x = self.flatten(x) x = self.dense(x) x = self.scalu(x) return x
10,958
30.222222
79
py
Riptide
Riptide-master/riptide/models/squeezenet.py
import tensorflow as tf from riptide.binary import binary_layers as nn bnmomemtum=0.9 class SqueezeNet(tf.keras.Model): def __init__(self, classes=1000): super(SqueezeNet, self).__init__() self.classes = classes self.c0 = nn.NormalConv2D(kernel_size=7, strides=4, filters=96, padding='same', activation='relu') self.b0 = nn.NormalBatchNormalization(momentum=bnmomemtum) #self.mp0 = nn.MaxPool2D(pool_size=2) self.enter_int = nn.EnterInteger(scale=1.0) # Fire 1 self.f1c1 = nn.BinaryConv2D(filters=32, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f1b1 = nn.BatchNormalization(self.f1c1, momentum=bnmomemtum) self.f1c2 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f1b2 = nn.BatchNormalization(self.f1c2, momentum=bnmomemtum) self.f1c3 = nn.BinaryConv2D(filters=64, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f1b3 = nn.BatchNormalization(self.f1c3, momentum=bnmomemtum) self.f1concat = tf.keras.layers.Concatenate(axis=-1) # Fire 2 self.f2c1 = nn.BinaryConv2D(filters=32, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f2b1 = nn.BatchNormalization(self.f2c1, momentum=bnmomemtum) self.f2c2 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f2b2 = nn.BatchNormalization(self.f2c2, momentum=bnmomemtum) self.f2c3 = nn.BinaryConv2D(filters=64, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f2b3 = nn.BatchNormalization(self.f2c3, momentum=bnmomemtum) self.f2concat = tf.keras.layers.Concatenate(axis=-1) # Fire 3 self.f3c1 = nn.BinaryConv2D(filters=32, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f3b1 = nn.BatchNormalization(self.f3c1, momentum=bnmomemtum) self.f3c2 = nn.BinaryConv2D(filters=128, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f3b2 = nn.BatchNormalization(self.f3c2, momentum=bnmomemtum) self.f3c3 = nn.BinaryConv2D(filters=128, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f3b3 = nn.BatchNormalization(self.f3c3, momentum=bnmomemtum) self.f3concat = tf.keras.layers.Concatenate(axis=-1) self.mp3 = tf.keras.layers.MaxPooling2D(pool_size=2) # Fire 4 self.f4c1 = nn.BinaryConv2D(filters=32, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f4b1 = nn.BatchNormalization(self.f4c1, momentum=bnmomemtum) self.f4c2 = nn.BinaryConv2D(filters=128, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f4b2 = nn.BatchNormalization(self.f4c2, momentum=bnmomemtum) self.f4c3 = nn.BinaryConv2D(filters=128, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f4b3 = nn.BatchNormalization(self.f4c3, momentum=bnmomemtum) self.f4concat = tf.keras.layers.Concatenate(axis=-1) # Fire 5 self.f5c1 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f5b1 = nn.BatchNormalization(self.f5c1, momentum=bnmomemtum) self.f5c2 = nn.BinaryConv2D(filters=192, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f5b2 = nn.BatchNormalization(self.f5c2, momentum=bnmomemtum) self.f5c3 = nn.BinaryConv2D(filters=192, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f5b3 = nn.BatchNormalization(self.f5c3, momentum=bnmomemtum) self.f5concat = tf.keras.layers.Concatenate(axis=-1) # Fire 6 self.f6c1 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f6b1 = nn.BatchNormalization(self.f6c1, momentum=bnmomemtum) self.f6c2 = nn.BinaryConv2D(filters=192, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f6b2 = nn.BatchNormalization(self.f6c2, momentum=bnmomemtum) self.f6c3 = nn.BinaryConv2D(filters=192, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f6b3 = nn.BatchNormalization(self.f6c3, momentum=bnmomemtum) self.f6concat = tf.keras.layers.Concatenate(axis=-1) # Fire 7 self.f7c1 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f7b1 = nn.BatchNormalization(self.f7c1, momentum=bnmomemtum) self.f7c2 = nn.BinaryConv2D(filters=256, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f7b2 = nn.BatchNormalization(self.f7c2, momentum=bnmomemtum) self.f7c3 = nn.BinaryConv2D(filters=256, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f7b3 = nn.BatchNormalization(self.f7c3, momentum=bnmomemtum) self.f7concat = tf.keras.layers.Concatenate(axis=-1) self.mp7 = tf.keras.layers.MaxPooling2D(pool_size=2) # Fire 8 self.f8c1 = nn.BinaryConv2D(filters=64, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f8b1 = nn.BatchNormalization(self.f8c1, momentum=bnmomemtum) self.f8c2 = nn.BinaryConv2D(filters=256, kernel_size=1, activation='relu', padding='same', use_bias=False) self.f8b2 = nn.BatchNormalization(self.f8c2, momentum=bnmomemtum) self.f8c3 = nn.BinaryConv2D(filters=256, kernel_size=3, activation='relu', padding='same', use_bias=False) self.f8b3 = nn.BatchNormalization(self.f8c3, momentum=bnmomemtum) self.f8concat = tf.keras.layers.Concatenate(axis=-1) self.exit_int = nn.ExitInteger() # Output self.avgpool = tf.keras.layers.GlobalAveragePooling2D() self.classifier = tf.keras.layers.Dense(1000, activation='softmax') def call(self, x, training=None): y = self.c0(x) y = self.b0(y, training=training) #y = self.mp0(y) y = self.enter_int(y) # Fire 1 y = self.f1c1(y) y = self.f1b1(y, training=training) y1x = self.f1c2(y) y1x = self.f1b2(y1x, training=training) y3x = self.f1c3(y) y3x = self.f1b3(y3x, training=training) y = self.f1concat([y1x, y3x]) # Fire 2 y = self.f2c1(y) y = self.f2b1(y, training=training) y1x = self.f2c2(y) y1x = self.f2b2(y1x, training=training) y3x = self.f2c3(y) y3x = self.f2b3(y3x, training=training) y = self.f2concat([y1x, y3x]) # Fire 3 y = self.f3c1(y) y = self.f3b1(y, training=training) y1x = self.f3c2(y) y1x = self.f3b2(y1x, training=training) y3x = self.f3c3(y) y3x = self.f3b3(y3x, training=training) y = self.f3concat([y1x, y3x]) y = self.mp3(y) # Fire 4 y = self.f4c1(y) y = self.f4b1(y, training=training) y1x = self.f4c2(y) y1x = self.f4b2(y1x, training=training) y3x = self.f4c3(y) y3x = self.f4b3(y3x, training=training) y = self.f4concat([y1x, y3x]) # Fire 5 y = self.f5c1(y) y = self.f5b1(y, training=training) y1x = self.f5c2(y) y1x = self.f5b2(y1x, training=training) y3x = self.f5c3(y) y3x = self.f5b3(y3x, training=training) y = self.f5concat([y1x, y3x]) # Fire 6 y = self.f6c1(y) y = self.f6b1(y, training=training) y1x = self.f6c2(y) y1x = self.f6b2(y1x, training=training) y3x = self.f6c3(y) y3x = self.f6b3(y3x, training=training) y = self.f6concat([y1x, y3x]) # Fire 7 y = self.f7c1(y) y = self.f7b1(y, training=training) y1x = self.f7c2(y) y1x = self.f7b2(y1x, training=training) y3x = self.f7c3(y) y3x = self.f7b3(y3x, training=training) y = self.f7concat([y1x, y3x]) y = self.mp7(y) # Fire 8 y = self.f8c1(y) y = self.f8b1(y, training=training) y1x = self.f8c2(y) y1x = self.f8b2(y1x, training=training) y3x = self.f8c3(y) y3x = self.f8b3(y3x, training=training) y = self.f8concat([y1x, y3x]) y = self.exit_int(y) y = self.avgpool(y) y = self.classifier(y) return y
8,503
45.217391
114
py
Riptide
Riptide-master/riptide/models/vggnet.py
import os import tensorflow as tf from riptide.binary import binary_layers as nn class vggnet(tf.keras.Model): def __init__(self, classes=1000): super(vggnet, self).__init__() self.conv1 = nn.NormalConv2D( filters=96, kernel_size=7, strides=2, padding='same', activation=None, use_bias=False) self.pool1 = nn.NormalMaxPool2D(pool_size=2, strides=2) self.bn1 = nn.NormalBatchNormalization(center=False, scale=False) self.quantize = nn.EnterInteger(1.0) self.conv2 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn2 = nn.BatchNormalization(self.conv2) self.conv3 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn3 = nn.BatchNormalization(self.conv3) self.conv4 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.pool4 = nn.MaxPool2D(pool_size=2, strides=2) self.bn4 = nn.BatchNormalization(self.conv4) self.conv5 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn5 = nn.BatchNormalization(self.conv5) self.conv6 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn6 = nn.BatchNormalization(self.conv6) self.conv7 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.pool7 = nn.MaxPool2D(pool_size=2, strides=2) self.bn7 = nn.BatchNormalization(self.conv7) self.conv8 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn8 = nn.BatchNormalization(self.conv8) self.conv9 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn9 = nn.BatchNormalization(self.conv9) self.conv10 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.pool10 = nn.MaxPool2D(pool_size=2, strides=2) self.bn10 = nn.BatchNormalization(self.conv10) self.flatten = nn.Flatten() #self.exit_int = nn.ExitInteger() #self.dense1 = nn.NormalDense(4096, activation='relu') #self.bn11 = nn.NormalBatchNormalization() #self.dense2 = nn.NormalDense(4096, activation='relu') #self.bn12 = nn.NormalBatchNormalization() ##self.scalu = nn.Scalu() #self.dense3 = nn.NormalDense(classes) #self.softmax = nn.Activation('softmax') def call(self, inputs, training=None, debug=False): layers = [] with tf.name_scope('unbinarized'): x = self.conv1(inputs) layers.append(x) x = self.pool1(x) layers.append(x) x = self.bn1(x, training=training) layers.append(x) x = self.quantize(x) layers.append(x) x = self.conv2(x) layers.append(x) x = self.bn2( x, training=training) layers.append(x) x = self.conv3(x) layers.append(x) x = self.bn3( x, training=training) layers.append(x) x = self.conv4(x) layers.append(x) x = self.pool4(x) layers.append(x) x = self.bn4( x, training=training) layers.append(x) x = self.conv5(x) layers.append(x) x = self.bn5( x, training=training) layers.append(x) x = self.conv6(x) layers.append(x) x = self.bn6( x, training=training) layers.append(x) x = self.conv7(x) layers.append(x) x = self.pool7(x) layers.append(x) x = self.bn7( x, training=training) layers.append(x) x = self.conv8(x) layers.append(x) x = self.bn8( x, training=training) layers.append(x) x = self.conv9(x) layers.append(x) x = self.bn9( x, training=training) layers.append(x) x = self.conv10(x) layers.append(x) x = self.pool10(x) layers.append(x) x = self.bn10( x, training=training) layers.append(x) x = self.flatten(x) layers.append(x) #x = self.exit_int(x) #x = self.dense1(x) #layers.append(x) #x = self.bn11( # x, training=training) #layers.append(x) #x = self.dense2(x) #layers.append(x) #x = self.bn12( # x, training=training) #layers.append(x) #x = self.dense3(x) #layers.append(x) ##x = self.scalu(x) ##layers.append(x) #x = self.softmax(x) #tf.compat.v1.summary.histogram('output', x) if debug: return layers else: return x
5,825
27.419512
73
py
Riptide
Riptide-master/riptide/models/alexnet_normal.py
import os import tensorflow as tf from riptide.binary import binary_layers as nn class alexnet(tf.keras.Model): def __init__(self, classes=1000): super(alexnet, self).__init__() self.conv1 = nn.NormalConv2D( filters=64, kernel_size=11, strides=4, padding='same', activation='relu', use_bias=True) self.pool1 = nn.NormalMaxPool2D(pool_size=2, strides=2) self.bn1 = nn.NormalBatchNormalization(center=False, scale=False) self.conv2 = nn.NormalConv2D( filters=192, kernel_size=5, strides=1, padding='same', activation='relu', use_bias=True) self.pool2 = nn.NormalMaxPool2D(pool_size=2, strides=2) self.bn2 = nn.NormalBatchNormalization() self.conv3 = nn.NormalConv2D( filters=384, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=True) self.bn3 = nn.NormalBatchNormalization() self.conv4 = nn.NormalConv2D( filters=384, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=True) self.bn4 = nn.NormalBatchNormalization() self.conv5 = nn.NormalConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=True) self.pool5 = nn.NormalMaxPool2D(pool_size=2, strides=2) self.bn5 = nn.NormalBatchNormalization() self.flatten = nn.Flatten() self.dense6 = nn.NormalDense(4096, use_bias=True, activation='relu') self.bn6 = nn.NormalBatchNormalization() self.dense7 = nn.NormalDense(4096, use_bias=True, activation='relu') self.bn7 = nn.NormalBatchNormalization() self.dense8 = nn.NormalDense(classes, use_bias=True) #self.scalu = nn.Scalu() self.softmax = nn.Activation('softmax') def call(self, inputs, training=None): x = self.conv1(inputs) x = self.pool1(x) x = self.bn1(x) x = self.conv2(x) x = self.pool2(x) x = self.bn2(x, training=training) x = self.conv3(x) x = self.bn3(x, training=training) x = self.conv4(x) x = self.bn4(x, training=training) x = self.conv5(x) x = self.pool5(x) x = self.bn5(x, training=training) x = self.flatten(x) x = self.dense6(x) x = self.bn6(x, training=training) x = self.dense7(x) x = self.bn7(x, training=training) x = self.dense8(x) #x = self.scalu(x) x = self.softmax(x) return x
2,810
28.589474
76
py
Riptide
Riptide-master/riptide/models/cifar_resnet.py
import os import tensorflow as tf import tensorflow.keras.layers as nn #from tensorflow.keras.models import Sequential from riptide.utils.sequential import forward_layer_list def _conv3x3(channels, stride): return nn.Conv2D( channels, kernel_size=3, strides=stride, padding="same", use_bias=False) class CIFARBasicBlockV1(tf.keras.Model): """BasicBlock V1 from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. This is used for ResNet V1 for 18, 34 layers. Parameters ---------- channels : int Number of output channels. stride : int Stride size. downsample : bool, default False Whether to downsample the input. """ def __init__(self, channels, stride, downsample=False, **kwargs): super(CIFARBasicBlockV1, self).__init__(**kwargs) self.body = [] if downsample: self.downsample = [] else: self.downsample = None self.body.append(_conv3x3(channels, stride)) self.body.append(nn.BatchNormalization()) self.body.append(nn.Activation('relu')) self.body.append(_conv3x3(channels, 1)) self.body.append(nn.BatchNormalization()) if self.downsample is not None: self.downsample.append( nn.Conv2D( channels, kernel_size=1, strides=stride, use_bias=False)) self.downsample.append(nn.BatchNormalization()) def call(self, x): residual = x x = forward_layer_list(x, self.body) if self.downsample is not None: residual = forward_layer_list(residual, self.downsample) x = residual + x return tf.keras.activations.relu(x) class CIFARBasicBlockV2(tf.keras.Model): """BasicBlock V2 from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. This is used for ResNet V2 for 18, 34 layers. Parameters ---------- channels : int Number of output channels. stride : int Stride size. downsample : bool, default False Whether to downsample the input. in_channels : int, default 0 Number of input channels. Default is 0, to infer from the graph. """ def __init__(self, channels, stride, downsample=False, **kwargs): super(CIFARBasicBlockV2, self).__init__(**kwargs) self.body = [] if downsample: self.downsample = [] else: self.downsample = None self.body.append(nn.BatchNormalization) self.body.append(nn.Activation('relu')) self.body.append(_conv3x3(channels, stride)) self.body.append(nn.BatchNormalization) self.body.append(nn.Activation('relu')) self.body.append(_conv3x3(channels, 1)) if self.downsample is not None: self.downsample.append( nn.Conv2D( channels, kernel_size=1, strides=stride, use_bias=False, activation='relu')) def call(self, x): residual = x x = forward_layer_list(x, self.body) if self.downsample is not None: residual = forward_layer_list(residual, self.downsample) return x + residual class CIFARResNetV1(tf.keras.Model): """ResNet V1 model from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. Parameters ---------- block : HybridBlock Class for the residual block. Options are CIFARBasicBlockV1, BottleneckV1. layers : list of int Numbers of layers in each block channels : list of int Numbers of channels in each block. Length should be one larger than layers list. classes : int, default 10 Number of classification classes. """ def __init__(self, block, layers, channels, classes=10, **kwargs): super(CIFARResNetV1, self).__init__(**kwargs) with tf.name_scope("CIFARResNetV1"): self.features = [] self.features.append( nn.Conv2D( channels[0], kernel_size=3, strides=1, padding="same", use_bias=False)) self.features.append(nn.BatchNormalization()) for i, num_layer in enumerate(layers): stride = 1 if i == 0 else 2 self.features.append( self._make_layer( block, num_layer, channels[i + 1], stride, i + 1, in_channels=channels[i])) self.features.append(nn.GlobalAveragePooling2D()) self.output_layer = nn.Dense(classes) def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0): layer = [] layer.append(block(channels, stride, channels != in_channels)) for _ in range(layers - 1): layer.append(block(channels, 1, False)) return layer def call(self, x): x = forward_layer_list(x, self.features) x = self.output_layer(x) return x class CIFARResNetV2(tf.keras.Model): """ResNet V2 model from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- block : HybridBlock Class for the residual block. Options are CIFARBasicBlockV1, BottleneckV1. layers : list of int Numbers of layers in each block channels : list of int Numbers of channels in each block. Length should be one larger than layers list. classes : int, default 10 Number of classification classes. """ def __init__(self, block, layers, channels, classes=10, **kwargs): super(CIFARResNetV2, self).__init__(**kwargs) assert len(layers) == len(channels) - 1 self.features = [] self.features.append(nn.BatchNormalization(scale=False, center=False)) in_channels = channels[0] for i, num_layer in enumerate(layers): stride = 1 if i == 0 else 2 self.features.append( self._make_layer( block, num_layer, channels[i + 1], stride, i + 1, in_channels=in_channels)) in_channels = channels[i + 1] self.features.append(nn.BatchNormalization()) self.features.append(nn.Activation('relu')) self.features.append(nn.GlobalAveragePooling2D()) self.features.append(nn.Flatten()) self.output_layer = nn.Dense(classes) def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0): layer = [] layer.append(block(channels, stride, channels != in_channels)) for _ in range(layers - 1): layer.append(block(channels, 1, False)) return layer def call(self, x): x = forward_layer_list(x, self.features) x = self.output_layer(x) return x # Specification resnet_net_versions = [CIFARResNetV1, CIFARResNetV2] resnet_block_versions = [CIFARBasicBlockV1, CIFARBasicBlockV2] def _get_resnet_spec(num_layers): assert (num_layers - 2) % 6 == 0 n = (num_layers - 2) // 6 channels = [16, 16, 32, 64] layers = [n] * (len(channels) - 1) return layers, channels # Constructor def get_cifar_resnet(version, num_layers, **kwargs): r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. ResNet V2 model from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- version : int Version of ResNet. Options are 1, 2. num_layers : int Numbers of layers. Needs to be an integer in the form of 6*n+2, e.g. 20, 56, 110, 164. """ layers, channels = _get_resnet_spec(num_layers) resnet_class = resnet_net_versions[version - 1] block_class = resnet_block_versions[version - 1] net = resnet_class(block_class, layers, channels, **kwargs) return net def cifar_resnet20_v1(**kwargs): r"""ResNet-20 V1 model for CIFAR10 from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(1, 20, **kwargs) def cifar_resnet56_v1(**kwargs): r"""ResNet-56 V1 model for CIFAR10 from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(1, 56, **kwargs) def cifar_resnet110_v1(**kwargs): r"""ResNet-110 V1 model for CIFAR10 from `"Deep Residual Learning for Image Recognition" <http://arxiv.org/abs/1512.03385>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(1, 110, **kwargs) def cifar_resnet20_v2(**kwargs): r"""ResNet-20 V2 model for CIFAR10 from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(2, 20, **kwargs) def cifar_resnet56_v2(**kwargs): r"""ResNet-56 V2 model for CIFAR10 from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(2, 56, **kwargs) def cifar_resnet110_v2(**kwargs): r"""ResNet-110 V2 model for CIFAR10 from `"Identity Mappings in Deep Residual Networks" <https://arxiv.org/abs/1603.05027>`_ paper. Parameters ---------- pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_cifar_resnet(2, 110, **kwargs)
11,875
30.839142
94
py
Riptide
Riptide-master/riptide/models/vgg11.py
import os import tensorflow as tf from riptide.binary import binary_layers as nn #from tensorflow.keras.models import Sequential from riptide.utils.sequential import forward_layer_list class vgg11(tf.keras.Model): def __init__(self, classes=1000): super(vgg11, self).__init__() # Set up configurable maxpool or stride dimension reduction. self.scope = nn.Config.current use_maxpool = self.scope.use_maxpool if use_maxpool: reduce_stride = 1 else: reduce_stride = 2 self.conv1 = nn.NormalConv2D( filters=64, kernel_size=3, strides=1, padding='same', activation='relu') self.bn1 = nn.NormalBatchNormalization() self.pool1 = nn.NormalMaxPool2D(pool_size=2, strides=2) self.quantize = nn.EnterInteger(1.0) self.conv2 = nn.BinaryConv2D( filters=128, kernel_size=3, strides=reduce_stride, padding='same', activation='relu', use_bias=False) self.bn2 = nn.BatchNormalization() self.pool2 = nn.MaxPool2D(pool_size=2, strides=2) self.conv3 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn3 = nn.BatchNormalization() self.conv4 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=reduce_stride, padding='same', activation='relu', use_bias=False) self.bn4 = nn.BatchNormalization() self.pool3 = nn.MaxPool2D(pool_size=2, strides=2) self.conv5 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn5 = nn.BatchNormalization() self.conv6 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=reduce_stride, padding='same', activation='relu', use_bias=False) self.bn6 = nn.BatchNormalization() self.pool4 = nn.MaxPool2D(pool_size=2, strides=2) self.conv7 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn7 = nn.BatchNormalization() self.conv8 = nn.BinaryConv2D( filters=512, kernel_size=3, strides=reduce_stride, padding='same', activation='relu', use_bias=False) self.bn8 = nn.BatchNormalization() self.pool5 = nn.MaxPool2D(pool_size=2, strides=2) self.avgpool = nn.GlobalAveragePooling2D() self.classifier = nn.BinaryDense(classes, use_bias=False) self.scalu = nn.Scalu() self.softmax = nn.Activation('softmax') def call(self, inputs, training=None, debug=False): layers = [] with tf.name_scope('unbinarized'): x = self.conv1(inputs) layers.append(x) x = self.bn1(x, training=training) layers.append(x) x = self.pool1(x) layers.append(x) # When running in binary, need to reduce spread of normal distribution x = self.quantize(x) layers.append(x) # Continue with binary layers. x = self.conv2(x) layers.append(x) x = self.bn2( x, conv_weights=self.conv2.weights[0].value(), training=training) layers.append(x) x = self.pool2(x) if self.scope.use_maxpool: layers.append(x) x = self.conv3(x) layers.append(x) x = self.bn3( x, conv_weights=self.conv3.weights[0].value(), training=training) layers.append(x) x = self.conv4(x) layers.append(x) x = self.bn4( x, conv_weights=self.conv4.weights[0].value(), training=training) layers.append(x) x = self.pool3(x) if self.scope.use_maxpool: layers.append(x) x = self.conv5(x) layers.append(x) x = self.bn5( x, conv_weights=self.conv5.weights[0].value(), training=training) layers.append(x) x = self.conv6(x) layers.append(x) x = self.bn6( x, conv_weights=self.conv6.weights[0].value(), training=training) layers.append(x) x = self.pool4(x) if self.scope.use_maxpool: layers.append(x) x = self.conv7(x) layers.append(x) x = self.bn7( x, conv_weights=self.conv7.weights[0].value(), training=training) layers.append(x) x = self.conv8(x) layers.append(x) x = self.bn8( x, conv_weights=self.conv8.weights[0].value(), training=training) layers.append(x) x = self.pool5(x) if self.scope.use_maxpool: layers.append(x) x = self.avgpool(x) layers.append(x) #with tf.name_scope('unbinarized'): x = self.classifier(x) layers.append(x) x = self.scalu(x) layers.append(x) x = self.softmax(x) layers.append(x) if debug: return layers else: return x
5,477
30.66474
78
py
Riptide
Riptide-master/riptide/models/resnetv1b.py
"""ResNetV1bs, implemented in tf Keras.""" # pylint: disable=arguments-differ,unused-argument,missing-docstring,dangerous-default-value import os import tensorflow as tf import tensorflow.keras.layers as nn #from tensorflow.keras.models import Sequential from riptide.utils.sequential import forward class BasicBlockV1b(tf.keras.Model): """ResNetV1b BasicBlockV1b """ expansion = 1 def __init__(self, planes, strides=1, dilation=1, downsample=None, previous_dilation=1, norm_layer=None, norm_kwargs={}, data_format='channels_last', **kwargs): super(BasicBlockV1b, self).__init__() self.conv1 = nn.Conv2D( filters=planes, kernel_size=3, strides=strides, padding='same', dilation_rate=dilation, use_bias=False, data_format=data_format) self.bn1 = norm_layer(**norm_kwargs) self.relu1 = nn.Activation('relu') self.conv2 = nn.Conv2D( filters=planes, kernel_size=3, strides=1, padding='same', dilation_rate=previous_dilation, use_bias=False, data_format=data_format) self.bn2 = norm_layer(**norm_kwargs) self.relu2 = nn.Activation('relu') self.downsample = downsample self.strides = strides def call(self, x): residual = x out = forward(x, self.conv1) out = forward(out, self.bn1) out = forward(out, self.relu1) out = forward(out, self.conv2) out = forward(out, self.bn2) if self.downsample is not None: residual = forward(x, self.downsample) out = out + residual out = forward(out, self.relu2) return out class BottleneckV1b(tf.keras.Model): """ResNetV1b BottleneckV1b """ # pylint: disable=unused-argument expansion = 4 def __init__(self, planes, strides=1, dilation=1, downsample=None, previous_dilation=1, norm_layer=None, norm_kwargs={}, last_gamma=False, data_format='channels_last', **kwargs): super(BottleneckV1b, self).__init__() self.conv1 = nn.Conv2D( filters=planes, kernel_size=1, use_bias=False, data_format=data_format) self.bn1 = norm_layer(**norm_kwargs) self.relu1 = nn.Activation('relu') self.conv2 = nn.Conv2D( filters=planes, kernel_size=3, strides=strides, padding='same', dilation_rate=dilation, use_bias=False, data_format=data_format) self.bn2 = norm_layer(**norm_kwargs) self.relu2 = nn.Activation('relu') self.conv3 = nn.Conv2D( filters=planes * 4, kernel_size=1, use_bias=False, data_format=data_format) if not last_gamma: self.bn3 = norm_layer(**norm_kwargs) else: self.bn3 = norm_layer(gamma_initializer='zeros', **norm_kwargs) self.relu3 = nn.Activation('relu') self.downsample = downsample self.dilation = dilation self.strides = strides def call(self, x): residual = x out = forward(x, self.conv1) out = forward(out, self.bn1) out = forward(out, self.relu1) out = forward(out, self.conv2) out = forward(out, self.bn2) out = forward(out, self.relu2) out = forward(out, self.conv3) out = forward(out, self.bn3) if self.downsample is not None: residual = forward(x, self.downsample) out = out + residual out = forward(out, self.relu3) return out class ResNetV1b(tf.keras.Model): """ Pre-trained ResNetV1b Model, which preduces the strides of 8 featuremaps at conv5. Parameters ---------- block : Block Class for the residual block. Options are BasicBlockV1, BottleneckV1. layers : list of int Numbers of layers in each block classes : int, default 1000 Number of classification classes. dilated : bool, default False Applying dilation strategy to pretrained ResNet yielding a stride-8 model, typically used in Semantic Segmentation. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`; for Synchronized Cross-GPU BachNormalization). last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. deep_stem : bool, default False Whether to replace the 7x7 conv1 with 3 3x3 convolution layers. avg_down : bool, default False Whether to use average pooling for projection skip connection between stages/downsample. final_drop : float, default 0.0 Dropout ratio before the final classification layer. Reference: - He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016. - Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions." """ # pylint: disable=unused-variable def __init__(self, block, layers, classes=1000, data_format='channels_last', dilated=False, norm_layer=nn.BatchNormalization, norm_kwargs={}, last_gamma=False, deep_stem=False, stem_width=32, avg_down=False, final_drop=0.0, name_prefix='', **kwargs): self.inplanes = stem_width * 2 if deep_stem else 64 self.data_format = data_format super(ResNetV1b, self).__init__(name=name_prefix) self.norm_kwargs = norm_kwargs with tf.name_scope(self.name): if not deep_stem: self.conv1 = nn.Conv2D( filters=64, kernel_size=7, strides=2, padding='same', use_bias=False, data_format=data_format) else: self.conv1 = ['conv1'] self.conv1.append( nn.Conv2D( filters=stem_width, kernel_size=3, strides=2, padding='same', use_bias=False, data_format=data_format)) self.conv1.append(norm_layer(**norm_kwargs)) self.conv1.append(nn.Activation('relu')) self.conv1.append( nn.Conv2D( filters=stem_width, kernel_size=3, strides=1, padding='same', use_bias=False, data_format=data_format)) self.conv1.append(norm_layer(**norm_kwargs)) self.conv1.append(nn.Activation('relu')) self.conv1.append( nn.Conv2D( filters=stem_width * 2, kernel_size=3, strides=1, padding='same', use_bias=False, data_format=data_format)) self.bn1 = norm_layer(**norm_kwargs) self.relu = nn.Activation('relu') self.maxpool = nn.MaxPool2D( pool_size=3, strides=2, padding='same', data_format=data_format) self.layer1 = self._make_layer( 1, block, 64, layers[0], avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) self.layer2 = self._make_layer( 2, block, 128, layers[1], strides=2, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) if dilated: self.layer3 = self._make_layer( 3, block, 256, layers[2], strides=1, dilation=2, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) self.layer4 = self._make_layer( 4, block, 512, layers[3], strides=1, dilation=4, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) else: self.layer3 = self._make_layer( 3, block, 256, layers[2], strides=2, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) self.layer4 = self._make_layer( 4, block, 512, layers[3], strides=2, avg_down=avg_down, norm_layer=norm_layer, last_gamma=last_gamma, data_format=data_format) self.avgpool = nn.GlobalAveragePooling2D(data_format=data_format) self.flat = nn.Flatten() self.drop = None if final_drop > 0.0: self.drop = nn.Dropout(final_drop) self.fc = nn.Dense(units=classes) def _make_layer(self, stage_index, block, planes, blocks, strides=1, dilation=1, avg_down=False, norm_layer=None, last_gamma=False, data_format='channels_last'): downsample = None if strides != 1 or self.inplanes != planes * block.expansion: downsample = ['down%d' % stage_index] if avg_down: if dilation == 1: downsample.append( nn.AveragePooling2D( pool_size=strides, strides=strides, padding='same', data_format=data_format)) else: downsample.append( nn.AveragePooling2D( pool_size=1, strides=1, padding='same', data_format=data_format)) downsample.append( nn.Conv2D( filters=planes * block.expansion, kernel_size=1, strides=1, use_bias=False, data_format=data_format)) downsample.append(norm_layer(**self.norm_kwargs)) else: downsample.append( nn.Conv2D( filters=planes * block.expansion, kernel_size=1, strides=strides, use_bias=False, data_format=data_format)) downsample.append(norm_layer(**self.norm_kwargs)) layers = ['layers%d' % stage_index] if dilation in (1, 2): layers.append( block( planes, strides, dilation=1, downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer, norm_kwargs=self.norm_kwargs, last_gamma=last_gamma, data_format=data_format)) elif dilation == 4: layers.append( block( planes, strides, dilation=2, downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer, norm_kwargs=self.norm_kwargs, last_gamma=last_gamma, data_format=data_format)) else: raise RuntimeError("=> unknown dilation size: {}".format(dilation)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( planes, dilation=dilation, previous_dilation=dilation, norm_layer=norm_layer, norm_kwargs=self.norm_kwargs, last_gamma=last_gamma, data_format=data_format)) return layers def call(self, x): if self.data_format == 'channels_first': x = tf.transpose(x, [0, 3, 1, 2]) x = forward(x, self.conv1) x = forward(x, self.bn1) x = forward(x, self.relu) x = forward(x, self.maxpool) x = forward(x, self.layer1) x = forward(x, self.layer2) x = forward(x, self.layer3) x = forward(x, self.layer4) x = forward(x, self.avgpool) x = forward(x, self.flat) if self.drop is not None: x = forward(x, self.drop) x = forward(x, self.fc) return x def resnet18_v1b(**kwargs): """Constructs a ResNetV1b-18 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; for Synchronized Cross-GPU BachNormalization). last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BasicBlockV1b, [2, 2, 2, 2], name_prefix='resnetv1b', **kwargs) return model def resnet34_v1b(**kwargs): """Constructs a ResNetV1b-34 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BasicBlockV1b, [3, 4, 6, 3], name_prefix='resnetv1b', **kwargs) return model def resnet50_v1b(**kwargs): """Constructs a ResNetV1b-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], name_prefix='resnetv1b', **kwargs) return model def resnet101_v1b(**kwargs): """Constructs a ResNetV1b-101 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], name_prefix='resnetv1b', **kwargs) return model def resnet152_v1b(**kwargs): """Constructs a ResNetV1b-152 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`; last_gamma : bool, default False Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero. """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], name_prefix='resnetv1b', **kwargs) return model def resnet50_v1c(**kwargs): """Constructs a ResNetV1c-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], deep_stem=True, name_prefix='resnetv1c_', **kwargs) return model def resnet101_v1c(**kwargs): """Constructs a ResNetV1c-101 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], deep_stem=True, name_prefix='resnetv1c_', **kwargs) return model def resnet152_v1c(**kwargs): """Constructs a ResNetV1b-152 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], deep_stem=True, name_prefix='resnetv1c_', **kwargs) return model def resnet50_v1d(**kwargs): """Constructs a ResNetV1d-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], deep_stem=True, avg_down=True, name_prefix='resnetv1d_', **kwargs) return model def resnet101_v1d(**kwargs): """Constructs a ResNetV1d-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], deep_stem=True, avg_down=True, name_prefix='resnetv1d_', **kwargs) return model def resnet152_v1d(**kwargs): """Constructs a ResNetV1d-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], deep_stem=True, avg_down=True, name_prefix='resnetv1d_', **kwargs) return model def resnet50_v1e(**kwargs): """Constructs a ResNetV1e-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], deep_stem=True, avg_down=True, stem_width=64, name_prefix='resnetv1e_', **kwargs) return model def resnet101_v1e(**kwargs): """Constructs a ResNetV1e-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], deep_stem=True, avg_down=True, stem_width=64, name_prefix='resnetv1e_', **kwargs) return model def resnet152_v1e(**kwargs): """Constructs a ResNetV1e-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], deep_stem=True, avg_down=True, stem_width=64, name_prefix='resnetv1e_', **kwargs) return model def resnet50_v1s(**kwargs): """Constructs a ResNetV1s-50 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 6, 3], deep_stem=True, stem_width=64, name_prefix='resnetv1s_', **kwargs) return model def resnet101_v1s(**kwargs): """Constructs a ResNetV1s-101 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 4, 23, 3], deep_stem=True, stem_width=64, name_prefix='resnetv1s_', **kwargs) return model def resnet152_v1s(**kwargs): """Constructs a ResNetV1s-152 model. Parameters ---------- dilated: bool, default False Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`; """ model = ResNetV1b( BottleneckV1b, [3, 8, 36, 3], deep_stem=True, stem_width=64, name_prefix='resnetv1s_', **kwargs) return model
23,710
31.216033
100
py
Riptide
Riptide-master/riptide/models/squeezenet_normal.py
import tensorflow as tf import tensorflow.keras.layers as nn bnmomemtum=0.9 class SqueezeNet(tf.keras.Model): def __init__(self, classes=1000): super(SqueezeNet, self).__init__() self.classes = classes self.c0 = tf.keras.layers.Conv2D(kernel_size=7, strides=2, filters=96, padding='same', use_bias=True, activation='relu') self.b0 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.mp0 = tf.keras.layers.MaxPooling2D(pool_size=2) # Fire 1 self.f1c1 = tf.keras.layers.Conv2D(filters=32, kernel_size=1, activation='relu', padding='same') self.f1b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f1c2 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same') self.f1b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f1c3 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same') self.f1b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f1concat = tf.keras.layers.Concatenate(axis=-1) # Fire 2 self.f2c1 = tf.keras.layers.Conv2D(filters=32, kernel_size=1, activation='relu', padding='same') self.f2b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f2c2 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same') self.f2b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f2c3 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same') self.f2b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f2concat = tf.keras.layers.Concatenate(axis=-1) # Fire 3 self.f3c1 = tf.keras.layers.Conv2D(filters=32, kernel_size=1, activation='relu', padding='same') self.f3b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f3c2 = tf.keras.layers.Conv2D(filters=128, kernel_size=1, activation='relu', padding='same') self.f3b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f3c3 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same') self.f3b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f3concat = tf.keras.layers.Concatenate(axis=-1) self.mp3 = tf.keras.layers.MaxPooling2D(pool_size=2) # Fire 4 self.f4c1 = tf.keras.layers.Conv2D(filters=32, kernel_size=1, activation='relu', padding='same') self.f4b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f4c2 = tf.keras.layers.Conv2D(filters=128, kernel_size=1, activation='relu', padding='same') self.f4b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f4c3 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same') self.f4b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f4concat = tf.keras.layers.Concatenate(axis=-1) # Fire 5 self.f5c1 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same') self.f5b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f5c2 = tf.keras.layers.Conv2D(filters=192, kernel_size=1, activation='relu', padding='same') self.f5b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f5c3 = tf.keras.layers.Conv2D(filters=192, kernel_size=3, activation='relu', padding='same') self.f5b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f5concat = tf.keras.layers.Concatenate(axis=-1) # Fire 6 self.f6c1 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same') self.f6b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f6c2 = tf.keras.layers.Conv2D(filters=192, kernel_size=1, activation='relu', padding='same') self.f6b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f6c3 = tf.keras.layers.Conv2D(filters=192, kernel_size=3, activation='relu', padding='same') self.f6b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f6concat = tf.keras.layers.Concatenate(axis=-1) # Fire 7 self.f7c1 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same') self.f7b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f7c2 = tf.keras.layers.Conv2D(filters=256, kernel_size=1, activation='relu', padding='same') self.f7b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f7c3 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, activation='relu', padding='same') self.f7b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f7concat = tf.keras.layers.Concatenate(axis=-1) self.mp7 = tf.keras.layers.MaxPooling2D(pool_size=2) # Fire 8 self.f8c1 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same') self.f8b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f8c2 = tf.keras.layers.Conv2D(filters=256, kernel_size=1, activation='relu', padding='same') self.f8b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f8c3 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, activation='relu', padding='same') self.f8b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum) self.f8concat = tf.keras.layers.Concatenate(axis=-1) # Output self.avgpool = tf.keras.layers.GlobalAveragePooling2D() self.classifier = tf.keras.layers.Dense(1000, activation='softmax') def call(self, x, training=None): y = self.c0(x) y = self.b0(y, training=training) y = self.mp0(y) # Fire 1 y = self.f1c1(y) y = self.f1b1(y, training=training) y1x = self.f1c2(y) y1x = self.f1b2(y1x, training=training) y3x = self.f1c3(y) y3x = self.f1b3(y3x, training=training) y = self.f1concat([y1x, y3x]) # Fire 2 y = self.f2c1(y) y = self.f2b1(y, training=training) y1x = self.f2c2(y) y1x = self.f2b2(y1x, training=training) y3x = self.f2c3(y) y3x = self.f2b3(y3x, training=training) y = self.f2concat([y1x, y3x]) # Fire 3 y = self.f3c1(y) y = self.f3b1(y, training=training) y1x = self.f3c2(y) y1x = self.f3b2(y1x, training=training) y3x = self.f3c3(y) y3x = self.f3b3(y3x, training=training) y = self.f3concat([y1x, y3x]) y = self.mp3(y) # Fire 4 y = self.f4c1(y) y = self.f4b1(y, training=training) y1x = self.f4c2(y) y1x = self.f4b2(y1x, training=training) y3x = self.f4c3(y) y3x = self.f4b3(y3x, training=training) y = self.f4concat([y1x, y3x]) # Fire 5 y = self.f5c1(y) y = self.f5b1(y, training=training) y1x = self.f5c2(y) y1x = self.f5b2(y1x, training=training) y3x = self.f5c3(y) y3x = self.f5b3(y3x, training=training) y = self.f5concat([y1x, y3x]) # Fire 6 y = self.f6c1(y) y = self.f6b1(y, training=training) y1x = self.f6c2(y) y1x = self.f6b2(y1x, training=training) y3x = self.f6c3(y) y3x = self.f6b3(y3x, training=training) y = self.f6concat([y1x, y3x]) # Fire 7 y = self.f7c1(y) y = self.f7b1(y, training=training) y1x = self.f7c2(y) y1x = self.f7b2(y1x, training=training) y3x = self.f7c3(y) y3x = self.f7b3(y3x, training=training) y = self.f7concat([y1x, y3x]) y = self.mp7(y) # Fire 8 y = self.f8c1(y) y = self.f8b1(y, training=training) y1x = self.f8c2(y) y1x = self.f8b2(y1x, training=training) y3x = self.f8c3(y) y3x = self.f8b3(y3x, training=training) y = self.f8concat([y1x, y3x]) y = self.avgpool(y) y = self.classifier(y) return y
8,215
44.644444
128
py
Riptide
Riptide-master/riptide/models/darknet.py
import os import tensorflow as tf from riptide.binary import binary_layers as nn #from tensorflow.keras.models import Sequential from riptide.utils.sequential import forward_layer_list class DarkNet(tf.keras.Model): def __init__(self): super(DarkNet, self).__init__() self.conv1 = nn.NormalConv2D( filters=16, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn1 = nn.BatchNormalization() self.mxp1 = nn.MaxPool2D(pool_size=2, strides=2) self.conv2 = nn.Conv2D( filters=32, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn2 = nn.BatchNormalization() self.mxp2 = nn.MaxPool2D(pool_size=2, strides=2) self.conv3 = nn.Conv2D( filters=64, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn3 = nn.BatchNormalization() self.mxp3 = nn.MaxPool2D(pool_size=2, strides=2) self.conv4 = nn.Conv2D( filters=128, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn4 = nn.BatchNormalization() self.mxp4 = nn.MaxPool2D(pool_size=2, strides=2) self.conv5 = nn.Conv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn5 = nn.BatchNormalization() self.mxp5 = nn.MaxPool2D(pool_size=2, strides=2) self.conv6 = nn.Conv2D( filters=512, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn6 = nn.BatchNormalization() self.mxp6 = nn.MaxPool2D(pool_size=2, strides=2) self.conv7 = nn.Conv2D( filters=1024, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn7 = nn.BatchNormalization() self.avgpool = nn.GlobalAveragePooling2D() self.output_layer = nn.NormalDense(1000, use_bias=False) def call(self, inputs, training=None): with tf.name_scope('unbinarized'): x = self.conv1(inputs) x = self.bn1(x, training=training) x = self.mxp1(x) x = self.conv2(x) x = self.bn2(x, training=training) x = self.mxp2(x) x = self.conv3(x) x = self.bn3(x, training=training) x = self.mxp3(x) x = self.conv4(x) x = self.bn4(x, training=training) x = self.mxp4(x) x = self.conv5(x) x = self.bn5(x, training=training) x = self.mxp5(x) x = self.conv6(x) x = self.bn6(x, training=training) x = self.mxp6(x) x = self.conv7(x) x = self.bn7(x, training=training) x = self.avgpool(x) with tf.name_scope('unbinarized'): x = self.output_layer(x) return x
3,266
28.432432
64
py
Riptide
Riptide-master/riptide/models/alexnet.py
import os import tensorflow as tf from riptide.binary import binary_layers as nn class alexnet(tf.keras.Model): def __init__(self, classes=1000): super(alexnet, self).__init__() self.conv1 = nn.NormalConv2D( filters=64, kernel_size=11, strides=4, padding='same', activation='relu', use_bias=True) self.bn1 = nn.NormalBatchNormalization(center=False, scale=False) self.pool1 = nn.NormalMaxPool2D(pool_size=2, strides=2) self.enter_int = nn.EnterInteger(scale=1.0) self.conv2 = nn.BinaryConv2D( filters=192, kernel_size=5, strides=1, padding='same', activation='relu', use_bias=False) self.bn2 = nn.BatchNormalization(self.conv2) self.pool2 = nn.MaxPool2D(pool_size=2, strides=2) self.conv3 = nn.BinaryConv2D( filters=384, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn3 = nn.BatchNormalization(self.conv3) self.conv4 = nn.BinaryConv2D( filters=384, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn4 = nn.BatchNormalization(self.conv4) self.conv5 = nn.BinaryConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation='relu', use_bias=False) self.bn5 = nn.BatchNormalization(self.conv5) self.pool5 = nn.MaxPool2D(pool_size=2, strides=2) self.exit_int = nn.ExitInteger() self.flatten = nn.Flatten() self.dense6 = nn.NormalDense(4096, use_bias=True, activation='relu') self.bn6 = nn.NormalBatchNormalization() self.dense7 = nn.NormalDense(4096, use_bias=True, activation='relu') self.bn7 = nn.NormalBatchNormalization() self.dense8 = nn.NormalDense(classes, use_bias=True) #self.scalu = nn.Scalu() self.softmax = nn.Activation('softmax') def call(self, inputs, training=None): x = self.conv1(inputs) x = self.bn1(x) x = self.pool1(x) x = self.enter_int(x) x = self.conv2(x) x = self.bn2(x, training=training) x = self.pool2(x) x = self.conv3(x) x = self.bn3(x, training=training) x = self.conv4(x) x = self.bn4(x, training=training) x = self.conv5(x) x = self.bn5(x, training=training) x = self.pool5(x) x = self.exit_int(x) x = self.flatten(x) x = self.dense6(x) x = self.bn6(x, training=training) x = self.dense7(x) x = self.bn7(x, training=training) x = self.dense8(x) #x = self.scalu(x) x = self.softmax(x) return x
2,970
29.010101
76
py
Riptide
Riptide-master/riptide/utils/datasets_test.py
import tensorflow as tf import numpy as np from functools import partial from datasets import imagerecord_dataset, imagefolder_dataset from riptide.utils.preprocessing.inception_preprocessing import preprocess_image class DatasetTest(tf.test.TestCase): def test_get_dataset(self): #preprocess = tf.keras.applications.inception_v3.preprocess_input preprocess = partial( preprocess_image, height=224, width=224, is_training=True) ds = imagerecord_dataset( '/data/imagenet/tfrecords', 2, preprocess=preprocess) image, label = next(iter(ds)) self.assertEqual((2, 224, 224, 3), image.shape) self.assertTrue((np.abs(image) <= 1.0).all()) def test_imagefolder(self): preprocess = partial( preprocess_image, height=224, width=224, is_training=True) ds = imagefolder_dataset( root='/data/imagenet', batch_size=2, preprocess=preprocess) image, label = next(iter(ds)) self.assertEqual((2, 224, 224, 3), image.shape) self.assertTrue((np.abs(image) <= 1.0).all()) if __name__ == '__main__': tf.test.main()
1,146
36
80
py
Riptide
Riptide-master/riptide/utils/sequential.py
import tensorflow as tf def _forward_core(x, layers): for l in layers: if isinstance(l, list): x = forward_layer_list(x, l) else: x = l(x) return x def forward_layer_list(x, layers): if isinstance(layers[0], str): name = layers.pop(0) with tf.name_scope(name): return _forward_core(x, layers) else: return _forward_core(x, layers) def forward(x, layer): if isinstance(layer, list): return forward_layer_list(x, layer) else: return layer(x) class Sequential(tf.keras.Model): def __init__(self, layers=None, name=None): super(Sequential, self).__init__() self.layer_list = [] if layers is not None: self.layers.append(layers) self.name_scope = name def add(self, l): self.layer_list.append(l) def call(self, x): with tf.name_scope(self.name_scope): for l in self.layer_list: if isinstance(l, list): x = self.call(l) else: x = l(x) return x
1,132
22.604167
47
py
Riptide
Riptide-master/riptide/anneal/anneal_funcs.py
import tensorflow as tf from riptide.anneal.anneal_config import Config @tf.custom_gradient def AlphaClip(x, alpha): output = tf.clip_by_value(x, 0, alpha) def grad_fn(dy): x_grad_mask = tf.cast(tf.logical_and(x >= 0, x <= alpha), tf.float32) alpha_grad_mask = tf.cast(x >= alpha, tf.float32) alpha_grad = tf.reduce_sum(dy * alpha_grad_mask) x_grad = dy * x_grad_mask return [x_grad, alpha_grad] return output, grad_fn @tf.custom_gradient def AlphaQuantize(x, alpha, bits): output = tf.round(x * ((2**bits - 1) / alpha)) * (alpha / (2**bits - 1)) def grad_fn(dy): return [dy, None, None] return output, grad_fn class PACT(tf.keras.layers.Layer): def __init__(self, bits=None): super(PACT, self).__init__() self.scope = Config.current self.quantize = self.scope.quantize self.bits = bits if self.bits is None: self.bits = self.scope.a_bits self.bits = float(self.bits) self.fixed = self.scope.fixed def build(self, input_shape): if self.quantize: if self.fixed: self.alpha = 1.0 else: self.alpha = self.add_variable( 'alpha', shape=[], initializer=tf.initializers.Constant([10.]), regularizer=tf.keras.regularizers.l2(0.0002)) def call(self, inputs): if self.quantize: outputs = AlphaClip(inputs, self.alpha) if not self.fixed: tf.summary.histogram('alpha', self.alpha) with tf.name_scope('QA'): outputs = AlphaQuantize(outputs, self.alpha, self.bits) tf.summary.histogram('activation', inputs) tf.summary.histogram('quantized_activation', outputs) else: outputs = tf.nn.relu(inputs) return outputs def get_config(self): return {'quantize': self.quantize, 'bits': self.bits} def compute_output_shape(self, input_shape): return input_shape def get_sawb_coefficients(bits): bits = int(bits) coefficient_dict = {1: [0., 1.], 2: [3.19, -2.14], 3: [7.40, -6.66], 4: [11.86, -11.68], 5: [17.08, -17.66], 6: [22.49, -23.95], 7: [28.68, -31.24], 8: [32.27, -35.46], 16: [34.26, -37.60], 32: [40.60, -45.33]} return coefficient_dict[bits] @tf.custom_gradient def SAWBQuantize(x, alpha, bits): # Clip between -alpha and alpha clipped = tf.clip_by_value(x, -alpha, alpha) # Rescale to [0, alpha] scaled = (clipped + alpha) / 2. # Quantize. quantized = tf.round(scaled * ((2**bits - 1) / alpha)) * (alpha / (2**bits - 1)) # Rescale to negative range. output = (2 * quantized) - alpha def grad_fn(dy): return [dy, None, None] return output, grad_fn class SAWBConv2D(tf.keras.layers.Conv2D): def __init__(self, bits=None, parent=None, *args, **kwargs): super(SAWBConv2D, self).__init__(*args, **kwargs) self.scope = Config.current self.quantize = self.scope.quantize self.bits = bits if self.bits is None: self.bits = self.scope.w_bits self.bits = float(self.bits) if self.quantize: self.c1, self.c2 = get_sawb_coefficients(self.bits) # optional for converting, reference to the layer that # quantized the outputs self.parent = parent def call(self, inputs): if self.quantize: # Compute proper scale for our weights. alpha = self.c1 * tf.sqrt(tf.reduce_mean( self.kernel**2)) + self.c2 * tf.reduce_mean( tf.abs(self.kernel)) # Quantize kernel with tf.name_scope("QW"): kernel = SAWBQuantize(self.kernel, alpha, self.bits) tf.summary.histogram("weight", self.kernel) tf.summary.histogram("quantized_weight", kernel) else: kernel = self.kernel # Invoke convolution outputs = self._convolution_op(inputs, kernel) if self.use_bias: if self.data_format == 'channels_first': outputs = tf.nn.bias_add( outputs, self.bias, data_format='NCHW') else: outputs = tf.nn.bias_add( outputs, self.bias, data_format='NHWC') if self.activation is not None: outputs = self.activation(outputs) return outputs class SAWBDense(tf.keras.layers.Dense): def __init__(self, bits=None, *args, **kwargs): super(SAWBDense, self).__init__(*args, **kwargs) self.scope = Config.current self.quantize = self.scope.quantize self.bits = bits if self.bits is None: self.bits = self.scope.w_bits if self.quantize: self.c1, self.c2 = get_sawb_coefficients(self.bits) def call(self, inputs): if self.quantize: alpha = self.c1 * tf.sqrt(tf.reduce_mean( self.kernel**2)) + self.c2 * tf.reduce_mean( tf.abs(self.kernel)) with tf.name_scope("QW"): kernel = SAWBQuantize(self.kernel, alpha, self.bits) tf.summary.histogram("weight", self.kernel) tf.summary.histogram("quantized_weight", kernel) else: kernel = self.kernel rank = len(inputs.shape) if rank > 2: # Broadcasting is required for the inputs. outputs = tf.tensordot(inputs, kernel, [[rank - 1], [0]]) # Reshape the output back to the original ndim of the input. if not tf.executing_eagerly(): shape = inputs.get_shape().as_list() output_shape = shape[:-1] + [self.units] outputs.set_shape(output_shape) else: outputs = tf.matmul(inputs, kernel) if self.use_bias: outputs = tf.nn.bias_add(outputs, self.bias) if self.activation is not None: outputs = self.activation(outputs) return outputs
6,278
32.57754
92
py
Riptide
Riptide-master/riptide/anneal/models/resnet.py
import six import tensorflow as tf import tensorflow.keras as keras from tensorflow.keras.layers import Input, Activation, Reshape, Dense, Conv2D, MaxPooling2D, GlobalMaxPooling2D, GlobalAveragePooling2D, Dropout, BatchNormalization, Add from tensorflow.keras.regularizers import l2 from riptide.anneal.anneal_funcs import * def _handle_dim_ordering(): global ROW_AXIS global COL_AXIS global CHANNEL_AXIS if keras.backend.image_data_format() == 'channels_last': ROW_AXIS = 1 COL_AXIS = 2 CHANNEL_AXIS = 3 else: CHANNEL_AXIS = 1 ROW_AXIS = 2 COL_AXIS = 3 def _bn_relu(x, bn_name=None): norm = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name)(x) return PACT()(norm) def _conv_bn_relu(**conv_params): filters = conv_params["filters"] kernel_size = conv_params["kernel_size"] strides = conv_params.setdefault("strides", (1, 1)) dilation_rate = conv_params.setdefault("dilation_rate", (1, 1)) conv_name = conv_params.setdefault("conv_name", None) bn_name = conv_params.setdefault("bn_name", None) kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal") padding = conv_params.setdefault("padding", "same") kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1e-4)) def f(x): x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, dilation_rate=dilation_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name=conv_name)(x) return _bn_relu(x, bn_name=bn_name) return f def _bn_relu_conv(**conv_params): filters = conv_params["filters"] kernel_size = conv_params["kernel_size"] strides = conv_params.setdefault("strides", (1, 1)) dilation_rate = conv_params.setdefault("dilation_rate", (1, 1)) conv_name = conv_params.setdefault("conv_name", None) bn_name = conv_params.setdefault("bn_name", None) kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal") padding = conv_params.setdefault("padding", "same") kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1e-4)) def f(x): activation = _bn_relu(x, bn_name=bn_name) return Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, dilation_rate=dilation_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name=conv_name)(activation) return f def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None): # Adds a shortcut between input and residual block and merges them with "sum". input_shape = input_feature.shape residual_shape = residual.shape stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS])) stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS])) equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS] shortcut = input_feature # 1x1 conv if shape is different, otherwise identity. if stride_width > 1 or stride_height > 1 or not equal_channels: if conv_name_base is not None: conv_name_base = conv_name_base + '1' shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS], kernel_size=(1, 1), strides=(stride_width, stride_height), padding="valid", kernel_initializer="he_normal", kernel_regularizer=l2(0.0001), name=conv_name_base)(input_feature) if bn_name_base is not None: bn_name_base = bn_name_base + '1' shortcut = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name_base)(shortcut) return Add()([shortcut, residual]) def _residual_block(block_function, filters, blocks, stage, transition_strides=None, transition_dilation_rates=None, dilation_rates=None, is_first_layer=False, dropout=None, residual_unit=_bn_relu_conv): """Builds a residual block with repeating bottleneck blocks. stage: integer, current stage label, used for generating label names. blocks: number of blocks 'a', 'b', ... current block label used for generating names. transition_strides: a list of tuples for strides of each transition. transition_dilation_rates: a list of tuples for the dilation rate of each transition. """ if transition_dilation_rates is None: transition_dilation_rates = [(1, 1)] * blocks if transition_strides is None: transition_strides = [(1, 1)] * blocks if dilation_rates is None: dilation_rates = [1] * blocks def f(x): for i in range(blocks): is_first_block = is_first_layer and i == 0 x = block_function(filters=filters, stage=stage, block=i, transition_strides=transition_strides[i], dilation_rate=dilation_rates[i], is_first_block_of_first_layer=is_first_block, dropout=dropout, residual_unit=residual_unit)(x) return x return f def _block_name_base(stage, block): # Get the convolution and batchnorm name for this stage and block. if block < 27: block = '%c' % (block + 97) conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' return conv_name_base, bn_name_base def basic_block(filters, stage, block, transition_strides=(1, 1), dilation_rate=(1, 1), is_first_block_of_first_layer=False, dropout=None, residual_unit=_bn_relu_conv): # Basic 3 x 3 convolution blocks for resnets with fewer than 34 layers. def f(input_features): conv_name_base, bn_name_base = _block_name_base(stage, block) if is_first_block_of_first_layer: # Dont repeat bn-> relu since we just did bn->relu->maxpool (also dont binarize) x = Conv2D(filters=filters, kernel_size=(3, 3), strides=transition_strides, dilation_rate=dilation_rate, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(1e-4), name=conv_name_base + '2a')(input_features) else: x = residual_unit(filters=filters, kernel_size=(3, 3), strides=transition_strides, dilation_rate=dilation_rate, conv_name_base=conv_name_base + '2a', bn_name_base=bn_name_base + '2a')(input_features) if dropout is not None: x = Dropout(dropout)(x) x = residual_unit(filters=filters, kernel_size=(3, 3), conv_name_base=conv_name_base + '2b', bn_name_base=bn_name_base + '2b')(x) return _shortcut(input_features, x) return f def bottleneck(filters, stage, block, transition_strides=(1, 1), dilation_rate=(1, 1), is_first_block_of_first_layer=False, dropout=None, residual_unit=_bn_relu_conv): # Bottleneck architecture for > 34 layer resnet. def f(input_feature): conv_name_base, bn_name_base = _block_name_base(stage, block) if is_first_block_of_first_layer: # Dont repeat bn->relu since we just did bn->relu->maxpool also dont quantize. x = Conv2D(filters=filters, kernel_size=(1, 1), strides=transition_strides, dilation_rate=dilation_rate, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(1e-4), name=conv_name_base + '2a')(input_feature) else: x = residual_unit(filters=filters, kernel_size=(1, 1), strides=transition_strides, dilation_rate=dilation_rate, conv_name_base=conv_name_base + '2a', bn_name_base=bn_name_base + '2a')(input_feature) if dropout is not None: x = Dropout(dropout(x)) x = residual_unit(filters=filters * 4, kernel_size=(1, 1), conv_name_base=conv_name_base + '2c', bn_name_base=bn_name_base + '2c')(x) return _shortcut(input_feature, x) return f def _string_to_function(identifier): if isinstance(identifier, six.string_types): res = globals().get(identifier) if not res: raise ValueError('Invalid {}'.format(identifier)) return res return identifier def ResNet(input_shape=None, classes=1000, block='bottleneck', residual_unit='v1', reptitions=None, initial_filters=64, activation='softmax', include_top=True, input_tensor=None, dropout=None, transition_dilation_rate=(1, 1), initial_strides=(2, 2), initial_kernel_size=(7, 7), initial_pooling='max', final_pooling=None, top='classification'): # Builds a resnet architecture if reptitions is None: reptitions = [3, 4, 6, 3] _handle_dim_ordering() if input_shape is None: input_shape = [224, 224, 3] if block == 'basic': block_fn = basic_block elif block == 'bottleneck': block_fn = bottleneck elif isinstance(block, six.string_types): block_fn = _string_to_function(block) else: block_fn = block if residual_unit == 'v2': residual_unit = _bn_relu_conv elif residual_unit == 'v1': residual_unit = _conv_bn_relu elif isinstance(residual_unit, six.string_types): residual_unit = _string_to_function(residual_unit) else: residual_unit = residual_unit img_input = Input(shape=input_shape, tensor=input_tensor) x = _conv_bn_relu(filters=initial_filters, kernel_size=initial_kernel_size, strides=initial_strides)(img_input) if initial_pooling == 'max': x = MaxPooling2D(pool_size=(3, 3), strides=initial_strides, padding="same")(x) block = x filters = initial_filters for i, r in enumerate(reptitions): transition_dilation_rates = [transition_dilation_rate * r] transition_strides = [(1, 1)] * r if transition_dilation_rate == (1, 1): transition_strides[0] = (2, 2) block = _residual_block( block_fn, filters=filters, stage=i, blocks=r, is_first_layer=(i == 0), dropout=dropout, transition_dilation_rates=transition_dilation_rates, transition_strides=transition_strides, residual_unit=residual_unit)(block) filters *= 2 # Last activation x = _bn_relu(block) # Classifier block if include_top and top is 'classification': x = GlobalAveragePooling2D()(x) x = Dense(units=classes, activation=activation, kernel_initializer="he_normal")(x) model = keras.Model(inputs=img_input, outputs=x) return model def ResNet18(input_shape=[224, 224, 3], classes=1000): return ResNet(input_shape, classes, basic_block, reptitions=[2, 2, 2, 2])
12,689
36.214076
169
py
Riptide
Riptide-master/riptide/anneal/models/resnet18.py
import tensorflow as tf import tensorflow.keras.layers as nn from riptide.anneal.anneal_funcs import * from tensorflow.keras.regularizers import l2 from tensorflow.keras.models import Sequential from riptide.binary.binary_layers import Scalu class resnet18(tf.keras.Model): def __init__(self, classes=1000): super(resnet18, self).__init__() # Input Layer self.conv1 = nn.Conv2D( filters=64, kernel_size=7, strides=2, padding='same', activation=None, use_bias=False) self.pool1 = nn.MaxPooling2D(pool_size=3, strides=2, padding='same') self.b1 = nn.BatchNormalization() self.p1 = PACT() # BasicBlock 1 self.block1_conv1 = SAWBConv2D( filters=64, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent = self.p1) self.block1_bn1 = nn.BatchNormalization() self.block1_p1 = PACT() self.block1_conv2 = SAWBConv2D( filters=64, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block1_p1 ) self.block1_down_conv = SAWBConv2D( # Not in dorefa? But part of resnet? filters=64, kernel_size=1, strides=1, padding='valid', activation=None, use_bias=False, parent=self.p1 ) self.block1_res = tf.keras.layers.Add() # BasicBlock 2 self.block2_bn1 = nn.BatchNormalization() self.block2_p1 = PACT() self.block2_conv1 = SAWBConv2D( filters=64, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block2_p1 ) self.block2_bn2 = nn.BatchNormalization() self.block2_p2 = PACT() self.block2_conv2 = SAWBConv2D( filters=64, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block2_p2) self.block2_res = tf.keras.layers.Add() # BasicBlock 3 self.block3_bn1 = nn.BatchNormalization() self.block3_p1 = PACT() self.block3_conv1 = SAWBConv2D( filters=128, kernel_size=3, strides=2, padding='same', activation=None, use_bias=False, parent=self.block3_p1) self.block3_bn2 = nn.BatchNormalization() self.block3_p2 = PACT() self.block3_conv2 = SAWBConv2D( filters=128, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block3_p2) self.block3_down_conv = SAWBConv2D( filters=128, kernel_size=1, strides=2, padding='valid', activation=None, use_bias=False, parent=self.block3_p1) self.block3_res = tf.keras.layers.Add() # BasicBlock 4 self.block4_bn1 = nn.BatchNormalization() self.block4_p1 = PACT() self.block4_conv1 = SAWBConv2D( filters=128, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block4_p1) self.block4_bn2 = nn.BatchNormalization() self.block4_p2 = PACT() self.block4_conv2 = SAWBConv2D( filters=128, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block4_p2) self.block4_res = tf.keras.layers.Add() # BasicBlock 5 self.block5_bn1 = nn.BatchNormalization() self.block5_p1 = PACT() self.block5_conv1 = SAWBConv2D( filters=256, kernel_size=3, strides=2, padding='same', activation=None, use_bias=False, parent=self.block5_p1) self.block5_bn2 = nn.BatchNormalization() self.block5_p2 = PACT() self.block5_conv2 = SAWBConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block5_p2) self.block5_down_conv = SAWBConv2D( filters=256, kernel_size=1, strides=2, padding='valid', activation=None, use_bias=False, parent=self.block5_p1) self.block5_res = tf.keras.layers.Add() # BasicBlock 6 self.block6_bn1 = nn.BatchNormalization() self.block6_p1 = PACT() self.block6_conv1 = SAWBConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block6_p1) self.block6_bn2 = nn.BatchNormalization() self.block6_p2 = PACT() self.block6_conv2 = SAWBConv2D( filters=256, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block6_p2) self.block6_res = tf.keras.layers.Add() # BasicBlock 7 self.block7_bn1 = nn.BatchNormalization() self.block7_p1 = PACT() self.block7_conv1 = SAWBConv2D( filters=512, kernel_size=3, strides=2, padding='same', activation=None, use_bias=False, parent=self.block7_p1) self.block7_bn2 = nn.BatchNormalization() self.block7_p2 = PACT() self.block7_conv2 = SAWBConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block7_p2) self.block7_down_conv = SAWBConv2D( filters=512, kernel_size=1, strides=2, padding='valid', activation=None, use_bias=False, parent=self.block7_p1) self.block7_res = tf.keras.layers.Add() # BasicBlock 8 self.block8_bn1 = nn.BatchNormalization() self.block8_p1 = PACT() self.block8_conv1 = SAWBConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block8_p1) self.block8_bn2 = nn.BatchNormalization() self.block8_p2 = PACT() self.block8_conv2 = SAWBConv2D( filters=512, kernel_size=3, strides=1, padding='same', activation=None, use_bias=False, parent=self.block8_p2) self.block8_res = tf.keras.layers.Add() self.bn2 = nn.BatchNormalization() self.avg_pool = nn.GlobalAveragePooling2D() self.flatten = nn.Flatten() # self.q = nn.EnterInteger(1.5) # Dorefa fp here # self.p2 = PACT() self.dense = nn.Dense(classes, use_bias=False) self.scalu = Scalu() self.softmax = nn.Activation('softmax') def call(self, inputs, training=None): # Input layer # this name scope just affects names of parameters...the a2w1 has it... # the a2w2 doesn't oops #with tf.name_scope('unbinarized'): x = self.conv1(inputs) x = self.pool1(x) x = self.b1(x, training=training) # Block 1 x = self.p1(x) residual = x x = self.block1_conv1(x) #return x x = self.block1_bn1(x, training=training) x = self.block1_p1(x) x = self.block1_conv2(x) residual = self.block1_down_conv(residual) x = self.block1_res([x, residual]) # Block 2 residual = x x = self.block2_bn1(x) x = self.block2_p1(x) x = self.block2_conv1(x) x = self.block2_bn2(x, training=training) x = self.block2_p2(x) x = self.block2_conv2(x) x = self.block2_res([x, residual]) # Block 3 x = self.block3_bn1(x, training=training) x = self.block3_p1(x) residual = x x = self.block3_conv1(x) x = self.block3_bn2(x,training=training) x = self.block3_p2(x) x = self.block3_conv2(x) residual = self.block3_down_conv(residual) x = self.block3_res([x, residual]) # Block 4 residual = x x = self.block4_bn1(x, training=training) x = self.block4_p1(x) x = self.block4_conv1(x) x = self.block4_bn2(x, training=training) x = self.block4_p2(x) x = self.block4_conv2(x) x = self.block4_res([x, residual]) # Block 5 x = self.block5_bn1(x, training=training) x = self.block5_p1(x) residual = x x = self.block5_conv1(x) #return x x = self.block5_bn2(x, training=training) x = self.block5_p2(x) x = self.block5_conv2(x) residual = self.block5_down_conv(residual) x = self.block5_res([x, residual]) # Block 6 residual = x x = self.block6_bn1(x, training=training) x = self.block6_p1(x) x = self.block6_conv1(x) x = self.block6_bn2(x, training=training) x = self.block6_p2(x) x = self.block6_conv2(x) #return x x = self.block6_res([x, residual]) # Block 7 x = self.block7_bn1(x, training=training) x = self.block7_p1(x) residual = x x = self.block7_conv1(x) #return x x = self.block7_bn2(x, training=training) x = self.block7_p2(x) x = self.block7_conv2(x) #return x residual = self.block7_down_conv(residual) x = self.block7_res([x, residual]) # Block 8 residual = x x = self.block8_bn1(x, training=training) x = self.block8_p1(x) x = self.block8_conv1(x) x = self.block8_bn2(x, training=training) x = self.block8_p2(x) x = self.block8_conv2(x) x = self.block8_res([x, residual]) # Output layers x = self.bn2(x, training=training) x = self.avg_pool(x) x = self.flatten(x) # x = self.q(x) # x = self.p2(x) x = self.dense(x) x = self.scalu(x) # only for 1-bit x = self.softmax(x) return x
10,989
28.543011
81
py
Riptide
Riptide-master/riptide/anneal/models/squeezenet.py
import tensorflow as tf import tensorflow.keras.layers as nn from riptide.anneal.anneal_funcs import * from tensorflow.keras.regularizers import l2 class SqueezeNet(tf.keras.models.Model): def __init__(self, classes=1000): super(SqueezeNet, self).__init__() self.classes = classes l2_reg = 5e-6 self.resize = nn.Lambda(lambda image: tf.image.resize(image, [224, 224])) self.c0 = nn.Conv2D(kernel_size=7, strides=2, filters=96, padding='same', activation='relu', kernel_regularizer=l2(l2_reg)) self.mp0 = nn.MaxPooling2D(pool_size=2) self.b0 = nn.BatchNormalization() self.p0 = PACT() # Fire 1 self.f1c1 = SAWBConv2D(filters=32, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f1b1 = nn.BatchNormalization() self.f1p1 = PACT() self.f1c2 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f1b2 = nn.BatchNormalization() self.f1p2 = PACT() self.f1c3 = SAWBConv2D(filters=64, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg)) self.f1b3 = nn.BatchNormalization() self.f1p3 = PACT() self.f1concat = nn.Concatenate(axis=-1) # Fire 2 self.f2c1 = SAWBConv2D(filters=32, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f2b1 = nn.BatchNormalization() self.f2p1 = PACT() self.f2c2 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f2b2 = nn.BatchNormalization() self.f2p2 = PACT() self.f2c3 = SAWBConv2D(filters=64, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg)) self.f2b3 = nn.BatchNormalization() self.f2p3 = PACT() self.f2concat = nn.Concatenate(axis=-1) # Fire 3 self.f3c1 = SAWBConv2D(filters=32, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f3b1 = nn.BatchNormalization() self.f3p1 = PACT() self.f3c2 = SAWBConv2D(filters=128, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f3b2 = nn.BatchNormalization() self.f3p2 = PACT() self.f3c3 = SAWBConv2D(filters=128, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg)) self.f3b3 = nn.BatchNormalization() self.f3p3 = PACT() self.f3concat = nn.Concatenate(axis=-1) self.mp3 = nn.MaxPooling2D(pool_size=2) # Fire 4 self.f4c1 = SAWBConv2D(filters=32, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f4b1 = nn.BatchNormalization() self.f4p1 = PACT() self.f4c2 = SAWBConv2D(filters=128, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f4b2 = nn.BatchNormalization() self.f4p2 = PACT() self.f4c3 = SAWBConv2D(filters=128, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg)) self.f4b3 = nn.BatchNormalization() self.f4p3 = PACT() self.f4concat = nn.Concatenate(axis=-1) # Fire 5 self.f5c1 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f5b1 = nn.BatchNormalization() self.f5p1 = PACT() self.f5c2 = SAWBConv2D(filters=192, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f5b2 = nn.BatchNormalization() self.f5p2 = PACT() self.f5c3 = SAWBConv2D(filters=192, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg)) self.f5b3 = nn.BatchNormalization() self.f5p3 = PACT() self.f5concat = nn.Concatenate(axis=-1) # Fire 6 self.f6c1 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f6b1 = nn.BatchNormalization() self.f6p1 = PACT() self.f6c2 = SAWBConv2D(filters=192, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f6b2 = nn.BatchNormalization() self.f6p2 = PACT() self.f6c3 = SAWBConv2D(filters=192, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg)) self.f6b3 = nn.BatchNormalization() self.f6p3 = PACT() self.f6concat = nn.Concatenate(axis=-1) # Fire 7 self.f7c1 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f7b1 = nn.BatchNormalization() self.f7p1 = PACT() self.f7c2 = SAWBConv2D(filters=256, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f7b2 = nn.BatchNormalization() self.f7p2 = PACT() self.f7c3 = SAWBConv2D(filters=256, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg)) self.f7b3 = nn.BatchNormalization() self.f7p3 = PACT() self.f7concat = nn.Concatenate(axis=-1) self.mp7 = nn.MaxPooling2D(pool_size=2) # Fire 8 self.f8c1 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f8b1 = nn.BatchNormalization() self.f8p1 = PACT() self.f8c2 = SAWBConv2D(filters=256, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg)) self.f8b2 = nn.BatchNormalization() self.f8p2 = PACT() self.f8c3 = SAWBConv2D(filters=256, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg)) self.f8b3 = nn.BatchNormalization() self.f8p3 = PACT() self.f8concat = nn.Concatenate(axis=-1) # Output self.avgpool = nn.GlobalAveragePooling2D() self.classifier = nn.Dense(self.classes, activation='softmax') def call(self, x, training=None): y = self.resize(x) y = self.c0(y) y = self.mp0(y) y = self.b0(y, training=training) y = self.p0(y) # Fire 1 y = self.f1c1(y) y = self.f1b1(y, training=training) y = self.f1p1(y) y1x = self.f1c2(y) y1x = self.f1b2(y1x, training=training) y1x = self.f1p2(y1x) y3x = self.f1c3(y) y3x = self.f1b3(y3x, training=training) y3x = self.f1p3(y3x) y = self.f1concat([y1x, y3x]) # Fire 2 y = self.f2c1(y) y = self.f2b1(y, training=training) y = self.f2p1(y) y1x = self.f2c2(y) y1x = self.f2b2(y1x, training=training) y1x = self.f2p2(y1x) y3x = self.f2c3(y) y3x = self.f2b3(y3x, training=training) y3x = self.f2p3(y3x) y = self.f2concat([y1x, y3x]) # Fire 3 y = self.f3c1(y) y = self.f3b1(y, training=training) y = self.f3p1(y) y1x = self.f3c2(y) y1x = self.f3b2(y1x, training=training) y1x = self.f3p2(y1x) y3x = self.f3c3(y) y3x = self.f3b3(y3x, training=training) y3x = self.f3p3(y3x) y = self.f3concat([y1x, y3x]) y = self.mp3(y) # Fire 4 y = self.f4c1(y) y = self.f4b1(y, training=training) y = self.f4p1(y) y1x = self.f4c2(y) y1x = self.f4b2(y1x, training=training) y1x = self.f4p2(y1x) y3x = self.f4c3(y) y3x = self.f4b3(y3x, training=training) y3x = self.f4p3(y3x) y = self.f4concat([y1x, y3x]) # Fire 5 y = self.f5c1(y) y = self.f5b1(y, training=training) y = self.f5p1(y) y1x = self.f5c2(y) y1x = self.f5b2(y1x, training=training) y1x = self.f5p2(y1x) y3x = self.f5c3(y) y3x = self.f5b3(y3x, training=training) y3x = self.f5p3(y3x) y = self.f5concat([y1x, y3x]) # Fire 6 y = self.f6c1(y) y = self.f6b1(y, training=training) y = self.f6p1(y) y1x = self.f6c2(y) y1x = self.f6b2(y1x, training=training) y1x = self.f6p2(y1x) y3x = self.f6c3(y) y3x = self.f6b3(y3x, training=training) y3x = self.f6p3(y3x) y = self.f6concat([y1x, y3x]) # Fire 7 y = self.f7c1(y) y = self.f7b1(y, training=training) y = self.f7p1(y) y1x = self.f7c2(y) y1x = self.f7b2(y1x, training=training) y1x = self.f7p2(y1x) y3x = self.f7c3(y) y3x = self.f7b3(y3x, training=training) y3x = self.f7p3(y3x) y = self.f7concat([y1x, y3x]) y = self.mp7(y) # Fire 8 y = self.f8c1(y) y = self.f8b1(y, training=training) y = self.f8p1(y) y1x = self.f8c2(y) y1x = self.f8b2(y1x, training=training) y1x = self.f8p2(y1x) y3x = self.f8c3(y) y3x = self.f8b3(y3x, training=training) y3x = self.f8p3(y3x) y = self.f8concat([y1x, y3x]) y = self.avgpool(y) y = self.classifier(y) tf.compat.v1.summary.histogram('output', y) return y
8,902
33.507752
131
py
Riptide
Riptide-master/riptide/anneal/models/alexnet.py
import tensorflow as tf import tensorflow.keras.layers as nn from riptide.anneal.anneal_funcs import * from tensorflow.keras.regularizers import l2 from tensorflow.keras.models import Sequential class alexnet(tf.keras.models.Model): def __init__(self, *args, **kwargs): super(alexnet, self).__init__(*args, **kwargs) l2_reg = 5e-6 # Layer 1 self.c1 = nn.Conv2D( 64, (11, 11), strides=(4, 4), padding='same', kernel_regularizer=l2(l2_reg)) self.m1 = nn.MaxPooling2D(pool_size=(2, 2)) self.b1 = nn.BatchNormalization() self.p1 = PACT() # Layer 2 self.c2 = SAWBConv2D( 192, (5, 5), padding='same', kernel_regularizer=l2(l2_reg)) self.m2 = nn.MaxPooling2D(pool_size=(2, 2)) self.b2 = nn.BatchNormalization() self.p2 = PACT() # Layer 3 self.c3 = SAWBConv2D( 384, (3, 3), padding='same', kernel_regularizer=l2(l2_reg)) self.b3 = nn.BatchNormalization() self.p3 = PACT() # Layer 4 self.c4 = SAWBConv2D( 384, (3, 3), padding='same', kernel_regularizer=l2(l2_reg)) self.b4 = nn.BatchNormalization() self.p4 = PACT() # Layer 5 self.c5 = SAWBConv2D( 256, (3, 3), padding='same', kernel_regularizer=l2(l2_reg)) self.m5 = nn.MaxPooling2D() self.b5 = nn.BatchNormalization() self.p5 = PACT() # FC self.f6 = nn.Flatten() self.d6 = SAWBDense(4096, kernel_regularizer=l2(l2_reg)) self.b6 = nn.BatchNormalization() self.p6 = PACT() # FC2 self.d7 = SAWBDense(4096, kernel_regularizer=l2(l2_reg)) self.b7 = nn.BatchNormalization() self.p7 = nn.Activation('relu') # Output self.d8 = nn.Dense(1000, kernel_regularizer=l2(l2_reg)) self.softmax = nn.Activation('softmax') def call(self, inputs, training=True): x = self.c1(inputs) x = self.m1(x) x = self.b1(x, training=training) x = self.p1(x) x = self.c2(x) x = self.m2(x) x = self.b2(x, training=training) x = self.p2(x) x = self.c3(x) x = self.b3(x, training=training) x = self.p3(x) x = self.c4(x) x = self.b4(x, training=training) x = self.p4(x) x = self.c5(x) x = self.m5(x) x = self.b5(x, training=training) x = self.p5(x) x = self.f6(x) x = self.d6(x) x = self.b6(x, training=training) x = self.p6(x) x = self.d7(x) x = self.b7(x, training=training) x = self.p7(x) x = self.d8(x) x = self.softmax(x) return x
2,787
26.60396
71
py
Riptide
Riptide-master/notebooks/squeezenet_test.py
# To add a new cell, type '#%%' # To add a new markdown cell, type '#%% [markdown]' #%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting # ms-python.python added import os try: os.chdir(os.path.join(os.getcwd(), 'notebooks')) print(os.getcwd()) except: pass #%% import os import cv2 import tensorflow as tf import numpy as np from functools import partial from riptide.anneal.models.squeezenet import SqueezeNet from riptide.anneal.models.get_models import get_model from riptide.anneal.anneal_config import Config #%% os.environ['CUDA_VISIBLE_DEVICES'] = '1' #%% cifar = tf.keras.datasets.cifar10.load_data() (train_images, train_labels), (test_images, test_labels) = cifar train_labels = tf.keras.utils.to_categorical(train_labels, 10) test_labels = tf.keras.utils.to_categorical(test_labels, 10) datagen = tf.keras.preprocessing.image.ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True) datagen.fit(train_images) #%% config = Config(quantize=False, a_bits=1, w_bits=1, fixed=False) with config: model = SqueezeNet(classes=10) model.compile( optimizer='adam', loss= 'categorical_crossentropy', metrics=['accuracy']) test_input = tf.keras.layers.Input(shape=[32, 32, 3]) test_output = model(test_input) model.summary() #%% model.fit_generator(datagen.flow(train_images, train_labels, batch_size=32), steps_per_epoch=len(train_images)/32, epochs=10, validation_data=(test_images, test_labels)) #%%
1,682
23.391304
169
py
TextSiM
TextSiM-main/Text_Simplification_Systems/access-main/access/preprocess.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from functools import wraps import multiprocessing import random import re from joblib import Parallel, delayed import torch from access.text import to_words from access.utils.helpers import (open_files, yield_lines, yield_lines_in_parallel, get_temp_filepath, delete_files, get_temp_filepaths) def apply_line_method_to_file(line_method, input_filepath): output_filepath = get_temp_filepath() with open(input_filepath, 'r') as input_file, open(output_filepath, 'w') as output_file: for line in input_file: transformed_line = line_method(line.rstrip('\n')) if transformed_line is not None: output_file.write(transformed_line + '\n') return output_filepath def replace_lrb_rrb(text): text = re.sub(r'-lrb-', '(', text, flags=re.IGNORECASE) text = re.sub(r'-rrb-', ')', text, flags=re.IGNORECASE) text = re.sub(r'-lsb-', '[', text, flags=re.IGNORECASE) text = re.sub(r'-rsb-', ']', text, flags=re.IGNORECASE) text = re.sub(r'-lcb-', '{', text, flags=re.IGNORECASE) text = re.sub(r'-rcb-', '}', text, flags=re.IGNORECASE) return text def replace_lrb_rrb_file(filepath): return apply_line_method_to_file(replace_lrb_rrb, filepath) def to_lrb_rrb(text): # TODO: Very basic text = re.sub(r'((^| ))\( ', r'\1-lrb- ', text) text = re.sub(r' \)((^| ))', r' -rrb-\1', text) return text def replace_back_quotes(text): return text.replace('`', "'") def replace_double_quotes(text): return text.replace("''", '"') def normalize_quotes(text): return replace_double_quotes(replace_back_quotes(text)) def to_lrb_rrb_file(input_filepath): return apply_line_method_to_file(to_lrb_rrb, input_filepath) def lowercase_file(filepath): return apply_line_method_to_file(lambda line: line.lower(), filepath) def concatenate_files(input_filepaths, output_filepath): with open(output_filepath, 'w') as output_f: for input_file in input_filepaths: with open(input_file, 'r') as input_f: for line in input_f: output_f.write(line) def split_file(input_filepath, output_filepaths, round_robin=False): if not round_robin: raise NotImplementedError('Splitting files is only implemented as round robin.') with open_files(output_filepaths, 'w') as files: # We write each line to a different file in a round robin fashion for i, line in enumerate(yield_lines(input_filepath)): files[i % len(output_filepaths)].write(line + '\n') def merge_files(input_filepaths, output_filepath, round_robin=False): if not round_robin: return concatenate_files(input_filepaths, output_filepath) with open(output_filepath, 'w') as f: for lines in yield_lines_in_parallel(input_filepaths, strict=False): for line in lines: if line is None: return f.write(line + '\n') def get_real_n_jobs(n_jobs): n_cpus = multiprocessing.cpu_count() if n_jobs < 0: # Adopt same logic as joblib n_jobs = n_cpus + 1 + n_jobs if n_jobs > n_cpus: print('Setting n_jobs={n_jobs} > n_cpus={n_cpus}, setting n_jobs={n_cpus}') n_jobs = n_cpus assert 0 < n_jobs <= n_cpus return n_jobs def get_parallel_file_pair_preprocessor(file_pair_preprocessor, n_jobs): if n_jobs == 1: return file_pair_preprocessor n_jobs = get_real_n_jobs(n_jobs) @wraps(file_pair_preprocessor) def parallel_file_pair_preprocessor(complex_filepath, simple_filepath, output_complex_filepath, output_simple_filepath): temp_complex_filepaths = get_temp_filepaths(n_jobs) temp_simple_filepaths = get_temp_filepaths(n_jobs) split_file(complex_filepath, temp_complex_filepaths, round_robin=True) split_file(simple_filepath, temp_simple_filepaths, round_robin=True) preprocessed_temp_complex_filepaths = get_temp_filepaths(n_jobs) preprocessed_temp_simple_filepaths = get_temp_filepaths(n_jobs) tasks = [ delayed(file_pair_preprocessor)(*paths) for paths in zip(temp_complex_filepaths, temp_simple_filepaths, preprocessed_temp_complex_filepaths, preprocessed_temp_simple_filepaths) ] Parallel(n_jobs=n_jobs)(tasks) merge_files(preprocessed_temp_complex_filepaths, output_complex_filepath, round_robin=True) merge_files(preprocessed_temp_simple_filepaths, output_simple_filepath, round_robin=True) delete_files(temp_complex_filepaths) delete_files(temp_simple_filepaths) delete_files(preprocessed_temp_complex_filepaths) delete_files(preprocessed_temp_simple_filepaths) return parallel_file_pair_preprocessor def word_shuffle(words, max_swap=3): noise = torch.rand(len(words)).mul_(max_swap) permutation = torch.arange(len(words)).float().add_(noise).sort()[1] return [words[i] for i in permutation] def word_dropout(words, dropout_prob=0.1): keep = torch.rand(len(words)) dropped_out_words = [word for i, word in enumerate(words) if keep[i] > dropout_prob] if len(dropped_out_words) == 0: return [words[random.randint(0, len(words) - 1)]] return dropped_out_words def word_blank(words, blank_prob=0.1): keep = torch.rand(len(words)) return [word if keep[i] > blank_prob else '<BLANK>' for i, word in enumerate(words)] def add_noise(sentence): words = to_words(sentence) words = word_shuffle(words, max_swap=3) words = word_dropout(words, dropout_prob=0.1) words = word_blank(words, blank_prob=0.1) return ' '.join(words)
5,957
34.676647
116
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/setup.py
""" Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py To create the package for pypi. 1. Change the version in __init__.py, setup.py as well as docs/source/conf.py. 2. Unpin specific versions from setup.py (like isort). 2. Commit these changes with the message: "Release: VERSION" 3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' " Push the tag to git: git push --tags origin master 4. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level directory. (this will build a wheel for the python version you use to build it). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions. 5. Check that everything looks correct by uploading the package to the pypi test server: twine upload dist/* -r pypitest (pypi suggest using twine as other methods upload files via plaintext.) You may have to specify the repository url, use the following command then: twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi transformers 6. Upload the final version to actual pypi: twine upload dist/* -r pypi 7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. 8. Add the release version to docs/source/_static/js/custom.js and .circleci/deploy.sh 9. Update README.md to redirect to correct documentation. """ import shutil from pathlib import Path from setuptools import find_packages, setup # Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466 stale_egg_info = Path(__file__).parent / "transformers.egg-info" if stale_egg_info.exists(): print( ( "Warning: {} exists.\n\n" "If you recently updated transformers to 3.0 or later, this is expected,\n" "but it may prevent transformers from installing in editable mode.\n\n" "This directory is automatically generated by Python's packaging tools.\n" "I will remove it now.\n\n" "See https://github.com/pypa/pip/issues/5466 for details.\n" ).format(stale_egg_info) ) shutil.rmtree(stale_egg_info) extras = {} extras["mecab"] = ["mecab-python3<1"] extras["sklearn"] = ["scikit-learn"] # keras2onnx and onnxconverter-common version is specific through a commit until 1.7.0 lands on pypi extras["tf"] = [ "tensorflow", "onnxconverter-common", "keras2onnx" # "onnxconverter-common @ git+git://github.com/microsoft/onnxconverter-common.git@f64ca15989b6dc95a1f3507ff6e4c395ba12dff5#egg=onnxconverter-common", # "keras2onnx @ git+git://github.com/onnx/keras-onnx.git@cbdc75cb950b16db7f0a67be96a278f8d2953b48#egg=keras2onnx" ] extras["tf-cpu"] = [ "tensorflow-cpu", "onnxconverter-common", "keras2onnx" # "onnxconverter-common @ git+git://github.com/microsoft/onnxconverter-common.git@f64ca15989b6dc95a1f3507ff6e4c395ba12dff5#egg=onnxconverter-common", # "keras2onnx @ git+git://github.com/onnx/keras-onnx.git@cbdc75cb950b16db7f0a67be96a278f8d2953b48#egg=keras2onnx" ] extras["torch"] = ["torch"] extras["serving"] = ["pydantic", "uvicorn", "fastapi", "starlette"] extras["all"] = extras["serving"] + ["tensorflow", "torch"] extras["testing"] = ["pytest", "pytest-xdist", "timeout-decorator", "psutil"] # sphinx-rtd-theme==0.5.0 introduced big changes in the style. extras["docs"] = ["recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme==0.4.3", "sphinx-copybutton"] extras["quality"] = [ "black", "isort", # "isort @ git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort", "flake8", ] extras["dev"] = extras["testing"] + extras["quality"] + ["mecab-python3<1", "scikit-learn", "tensorflow", "torch"] setup( name="transformers", version="3.0.2", author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors", author_email="thomas@huggingface.co", description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU", license="Apache", url="https://github.com/huggingface/transformers", package_dir={"": "src"}, packages=find_packages("src"), install_requires=[ "numpy", "tokenizers == 0.8.1.rc1", # dataclasses for Python versions that don't have it "dataclasses;python_version<'3.7'", # utilities from PyPA to e.g. compare versions "packaging", # filesystem locks e.g. to prevent parallel downloads "filelock", # for downloading models over HTTPS "requests", # progress bars in model download and training scripts "tqdm >= 4.27", # for OpenAI GPT "regex != 2019.12.17", # for XLNet "sentencepiece != 0.1.92", # for XLM "sacremoses", ], extras_require=extras, entry_points={ "console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"] }, python_requires=">=3.6.0", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], )
6,260
39.921569
217
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/hubconf.py
import os import sys SRC_DIR = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoModelWithLMHead, AutoTokenizer, add_start_docstrings, ) dependencies = ["torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses"] @add_start_docstrings(AutoConfig.__doc__) def config(*args, **kwargs): r""" # Using torch.hub ! import torch config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased') # Download configuration from S3 and cache. config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json') config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False) assert config.output_attention == True config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True) assert config.output_attention == True assert unused_kwargs == {'foo': False} """ return AutoConfig.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoTokenizer.__doc__) def tokenizer(*args, **kwargs): r""" # Using torch.hub ! import torch tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'bert-base-uncased') # Download vocabulary from S3 and cache. tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')` """ return AutoTokenizer.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModel.__doc__) def model(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModel.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModelWithLMHead.__doc__) def modelWithLMHead(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelWithLMHead.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModelForSequenceClassification.__doc__) def modelForSequenceClassification(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForSequenceClassification.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__) def modelForQuestionAnswering(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForQuestionAnswering.from_pretrained(*args, **kwargs)
6,612
50.664063
189
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/xla_spawn.py
""" A simple launcher script for TPU training Inspired by https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py :: >>> python xla_spawn.py --num_cores=NUM_CORES_YOU_HAVE YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) """ import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def parse_args(): """ Helper function parsing the command line options @retval ArgumentParser """ parser = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores", type=int, default=1, help="Number of TPU cores to use (1 or 8).") # positional parser.add_argument( "training_script", type=str, help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ), ) # rest from the training program parser.add_argument("training_script_args", nargs=REMAINDER) return parser.parse_args() def main(): args = parse_args() # Import training_script as a module. script_fpath = Path(args.training_script) sys.path.append(str(script_fpath.parent.resolve())) mod_name = script_fpath.stem mod = importlib.import_module(mod_name) # Patch sys.argv sys.argv = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores)] xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores) if __name__ == "__main__": main()
1,913
25.219178
108
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/lightning_base.py
import argparse import logging import os import random from pathlib import Path from typing import Any, Dict import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.utilities import rank_zero_info, rank_zero_only from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, get_linear_schedule_with_warmup, ) logger = logging.getLogger(__name__) MODEL_MODES = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeq2SeqLM, "translation": AutoModelForSeq2SeqLM, } def set_seed(args: argparse.Namespace): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.gpus > 0: torch.cuda.manual_seed_all(args.seed) class BaseTransformer(pl.LightningModule): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs ): """Initialize a model, tokenizer and config.""" super().__init__() self.hparams = hparams # TODO: move to self.save_hyperparameters() self.step_count = 0 self.tfmr_ckpts = {} self.output_dir = Path(self.hparams.output_dir) cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: self.config = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=cache_dir, **config_kwargs, ) else: self.config: PretrainedConfig = config if tokenizer is None: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir, ) else: self.tokenizer: PreTrainedTokenizer = tokenizer self.model_type = MODEL_MODES[mode] if model is None: self.model = self.model_type.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir, ) else: self.model = model def load_hf_checkpoint(self, *args, **kwargs): self.model = self.model_type.from_pretrained(*args, **kwargs) def configure_optimizers(self): "Prepare optimizer and schedule (linear warmup and decay)" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon) self.opt = optimizer return [optimizer] def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None): if self.trainer.use_tpu: xm.optimizer_step(optimizer) else: optimizer.step() optimizer.zero_grad() self.lr_scheduler.step() # By default, PL will only step every epoch. lrs = {f"lr_group_{i}": lr for i, lr in enumerate(self.lr_scheduler.get_lr())} self.logger.log_metrics(lrs) def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) def train_dataloader(self): train_batch_size = self.hparams.train_batch_size dataloader = self.load_dataset("train", train_batch_size) t_total = ( (len(dataloader.dataset) // (train_batch_size * max(1, self.hparams.n_gpu))) // self.hparams.gradient_accumulation_steps * float(self.hparams.num_train_epochs) ) scheduler = get_linear_schedule_with_warmup( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total ) self.lr_scheduler = scheduler return dataloader def val_dataloader(self): return self.load_dataset("dev", self.hparams.eval_batch_size) def test_dataloader(self): return self.load_dataset("test", self.hparams.eval_batch_size) def _feature_file(self, mode): return os.path.join( self.hparams.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), ) @pl.utilities.rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("best_tfmr") save_path.mkdir(exist_ok=True) self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) self.tfmr_ckpts[self.step_count] = save_path @staticmethod def add_model_specific_args(parser, root_dir): parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") parser.add_argument( "--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform." ) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--eval_batch_size", default=32, type=int) class LoggingCallback(pl.Callback): @rank_zero_only def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Validation results *****") metrics = trainer.callback_metrics # Log results for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) @rank_zero_only def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): logger.info("***** Test results *****") metrics = trainer.callback_metrics # Log and save results to file output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(metrics): if key not in ["log", "progress_bar"]: logger.info("{} = {}\n".format(key, str(metrics[key]))) writer.write("{} = {}\n".format(key, str(metrics[key]))) def add_generic_args(parser, root_dir) -> None: # TODO(SS): allow all pl args? parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--fast_dev_run", action="store_true") parser.add_argument("--gpus", type=int, default=1) parser.add_argument("--n_tpu_cores", type=int, default=0) parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--resume_from_checkpoint", type=str, default=None) parser.add_argument("--val_check_interval", default=1.0, type=float) def generic_train( model: BaseTransformer, args: argparse.Namespace, early_stopping_callback=False, logger=True, # can pass WandbLogger() here extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs ): # init model set_seed(args) odir = Path(model.hparams.output_dir) odir.mkdir(exist_ok=True) if checkpoint_callback is None: checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 ) if logging_callback is None: logging_callback = LoggingCallback() train_params = {} if args.fp16: train_params["use_amp"] = args.fp16 train_params["amp_level"] = args.fp16_opt_level if args.n_tpu_cores > 0: global xm import torch_xla.core.xla_model as xm train_params["num_tpu_cores"] = args.n_tpu_cores train_params["gpus"] = 0 if args.gpus > 1: train_params["distributed_backend"] = "ddp" trainer = pl.Trainer( logger=logger, accumulate_grad_batches=args.gradient_accumulation_steps, gpus=args.gpus, max_epochs=args.num_train_epochs, early_stop_callback=early_stopping_callback, gradient_clip_val=args.max_grad_norm, checkpoint_callback=checkpoint_callback, callbacks=[logging_callback] + extra_callbacks, fast_dev_run=args.fast_dev_run, val_check_interval=args.val_check_interval, weights_summary=None, resume_from_checkpoint=args.resume_from_checkpoint, **train_params, ) if args.do_train: trainer.fit(model) trainer.logger.log_hyperparams(args) trainer.logger.save() return trainer
12,332
35.92515
118
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/longform-qa/eli5_app.py
import faiss import nlp import numpy as np import torch from elasticsearch import Elasticsearch import streamlit as st import transformers from eli5_utils import ( embed_questions_for_retrieval, make_qa_s2s_model, qa_s2s_generate, query_es_index, query_qa_dense_index, ) from transformers import AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer MODEL_TYPE = "bart" LOAD_DENSE_INDEX = True @st.cache(allow_output_mutation=True) def load_models(): if LOAD_DENSE_INDEX: qar_tokenizer = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased") qar_model = AutoModel.from_pretrained("yjernite/retribert-base-uncased").to("cuda:0") _ = qar_model.eval() else: qar_tokenizer, qar_model = (None, None) if MODEL_TYPE == "bart": s2s_tokenizer = AutoTokenizer.from_pretrained("yjernite/bart_eli5") s2s_model = AutoModelForSeq2SeqLM.from_pretrained("yjernite/bart_eli5").to("cuda:0") save_dict = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth") s2s_model.load_state_dict(save_dict["model"]) _ = s2s_model.eval() else: s2s_tokenizer, s2s_model = make_qa_s2s_model( model_name="t5-small", from_file="seq2seq_models/eli5_t5_model_1024_4.pth", device="cuda:0" ) return (qar_tokenizer, qar_model, s2s_tokenizer, s2s_model) @st.cache(allow_output_mutation=True) def load_indexes(): if LOAD_DENSE_INDEX: faiss_res = faiss.StandardGpuResources() wiki40b_passages = nlp.load_dataset(path="wiki_snippets", name="wiki40b_en_100_0")["train"] wiki40b_passage_reps = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat", dtype="float32", mode="r", shape=(wiki40b_passages.num_rows, 128), ) wiki40b_index_flat = faiss.IndexFlatIP(128) wiki40b_gpu_index_flat = faiss.index_cpu_to_gpu(faiss_res, 1, wiki40b_index_flat) wiki40b_gpu_index_flat.add(wiki40b_passage_reps) # TODO fix for larger GPU else: wiki40b_passages, wiki40b_gpu_index_flat = (None, None) es_client = Elasticsearch([{"host": "localhost", "port": "9200"}]) return (wiki40b_passages, wiki40b_gpu_index_flat, es_client) @st.cache(allow_output_mutation=True) def load_train_data(): eli5 = nlp.load_dataset("eli5", name="LFQA_reddit") eli5_train = eli5["train_eli5"] eli5_train_q_reps = np.memmap( "eli5_questions_reps.dat", dtype="float32", mode="r", shape=(eli5_train.num_rows, 128) ) eli5_train_q_index = faiss.IndexFlatIP(128) eli5_train_q_index.add(eli5_train_q_reps) return (eli5_train, eli5_train_q_index) passages, gpu_dense_index, es_client = load_indexes() qar_tokenizer, qar_model, s2s_tokenizer, s2s_model = load_models() eli5_train, eli5_train_q_index = load_train_data() def find_nearest_training(question, n_results=10): q_rep = embed_questions_for_retrieval([question], qar_tokenizer, qar_model) D, I = eli5_train_q_index.search(q_rep, n_results) nn_examples = [eli5_train[int(i)] for i in I[0]] return nn_examples def make_support(question, source="wiki40b", method="dense", n_results=10): if source == "none": support_doc, hit_lst = (" <P> ".join(["" for _ in range(11)]).strip(), []) else: if method == "dense": support_doc, hit_lst = query_qa_dense_index( question, qar_model, qar_tokenizer, passages, gpu_dense_index, n_results ) else: support_doc, hit_lst = query_es_index( question, es_client, index_name="english_wiki40b_snippets_100w", n_results=n_results, ) support_list = [ (res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst ] question_doc = "question: {} context: {}".format(question, support_doc) return question_doc, support_list @st.cache(hash_funcs={torch.Tensor: (lambda _: None), transformers.tokenization_bart.BartTokenizer: (lambda _: None)}) def answer_question( question_doc, s2s_model, s2s_tokenizer, min_len=64, max_len=256, sampling=False, n_beams=2, top_p=0.95, temp=0.8 ): with torch.no_grad(): answer = qa_s2s_generate( question_doc, s2s_model, s2s_tokenizer, num_answers=1, num_beams=n_beams, min_len=min_len, max_len=max_len, do_sample=sampling, temp=temp, top_p=top_p, top_k=None, max_input_length=1024, device="cuda:0", )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar header_html = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" header_full = """ <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia description = """ This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) action_list = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] demo_options = st.sidebar.checkbox("Demo options") if demo_options: action_st = st.sidebar.selectbox("", action_list, index=3,) action = action_list.index(action_st) show_type = st.sidebar.selectbox("", ["Show full text of passages", "Show passage section titles"], index=0,) show_passages = show_type == "Show full text of passages" else: action = 3 show_passages = True retrieval_options = st.sidebar.checkbox("Retrieval options") if retrieval_options: retriever_info = """ ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) wiki_source = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) index_type = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: wiki_source = "wiki40b" index_type = "dense" sampled = "beam" n_beams = 2 min_len = 64 max_len = 256 top_p = None temp = None generate_options = st.sidebar.checkbox("Generation options") if generate_options: generate_info = """ ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) sampled = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) min_len = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) max_len = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": n_beams = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: top_p = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) temp = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) n_beams = None # start main text questions_list = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] question_s = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": question = st.text_input("Enter your question here:", "") else: question = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": _, support_list_dense = make_support(question, source=wiki_source, method="dense", n_results=10) _, support_list_sparse = make_support(question, source=wiki_source, method="sparse", n_results=10) support_list = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] support_list = support_list[:10] question_doc = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: question_doc, support_list = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: answer, support_list = answer_question( question_doc, s2s_model, s2s_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): wiki_url = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) sec_titles = res[1].strip() if sec_titles == "": sections = "[{}]({})".format(res[0], wiki_url) else: sec_list = sec_titles.split(" & ") sections = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: nn_train_list = find_nearest_training(question) train_exple = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) answers_st = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) disclaimer = """ --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
13,298
38.936937
159
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/longform-qa/eli5_utils.py
import functools import math import os # noqa: F401 from random import choice, randint from time import time import faiss # noqa: F401 import nlp # noqa: F401 import numpy as np import pandas as pd import torch import torch.utils.checkpoint as checkpoint from elasticsearch import Elasticsearch # noqa: F401 from elasticsearch.helpers import bulk, streaming_bulk # noqa: F401 from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler from tqdm import tqdm from transformers import AdamW, AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup pd.set_option("display.max_colwidth", None) ############### # Sparse index ############### def make_es_index_snippets(es_client, passages_dset, index_name="english_wiki_kilt_snippets_100w"): index_config = { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "article_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "section_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "passage_text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, } }, } es_client.indices.create(index=index_name, body=index_config) number_of_docs = passages_dset.num_rows progress = tqdm(unit="docs", total=number_of_docs) successes = 0 def passage_generator(): for passage in passages_dset: yield passage # create the ES index for ok, action in streaming_bulk(client=es_client, index=index_name, actions=passage_generator(),): progress.update(1) successes += ok print("Indexed %d documents" % (successes,)) def query_es_index(question, es_client, index_name="english_wiki_kilt_snippets_100w", n_results=10, min_length=20): q = question.lower() banned = ["how", "why", "what", "where", "which", "do", "does", "is", "?", "eli5", "eli5:"] q = " ".join([w for w in q.split() if w not in banned]) response = es_client.search( index=index_name, body={ "query": { "multi_match": { "query": q, "fields": ["article_title", "section_title", "passage_text^2"], "type": "cross_fields", } }, "size": 2 * n_results, }, ) hits = response["hits"]["hits"] support_doc = "<P> " + " <P> ".join([hit["_source"]["passage_text"] for hit in hits]) res_list = [dict([(k, hit["_source"][k]) for k in hit["_source"] if k != "passage_text"]) for hit in hits] for r, hit in zip(res_list, hits): r["passage_id"] = hit["_id"] r["score"] = hit["_score"] r["passage_text"] = hit["_source"]["passage_text"] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] return support_doc, res_list ############### # ELI5 retriever training ############### class ELI5DatasetQARetriver(Dataset): def __init__(self, examples_array, extra_answer_threshold=3, min_answer_length=64, training=True, n_samples=None): self.data = examples_array self.answer_thres = extra_answer_threshold self.min_length = min_answer_length self.training = training self.n_samples = self.data.num_rows if n_samples is None else n_samples def __len__(self): return self.n_samples def make_example(self, idx): example = self.data[idx] question = example["title"] if self.training: answers = [a for i, (a, sc) in enumerate(zip(example["answers"]["text"], example["answers"]["score"]))] answer_tab = choice(answers).split(" ") start_idx = randint(0, max(0, len(answer_tab) - self.min_length)) answer_span = " ".join(answer_tab[start_idx:]) else: answer_span = example["answers"]["text"][0] return (question, answer_span) def __getitem__(self, idx): return self.make_example(idx % self.data.num_rows) class RetrievalQAEmbedder(torch.nn.Module): def __init__(self, sent_encoder, dim): super(RetrievalQAEmbedder, self).__init__() self.sent_encoder = sent_encoder self.output_dim = 128 self.project_q = torch.nn.Linear(dim, self.output_dim, bias=False) self.project_a = torch.nn.Linear(dim, self.output_dim, bias=False) self.ce_loss = torch.nn.CrossEntropyLoss(reduction="mean") def embed_sentences_checkpointed(self, input_ids, attention_mask, checkpoint_batch_size=-1): # reproduces BERT forward pass with checkpointing if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size: return self.sent_encoder(input_ids, attention_mask=attention_mask)[1] else: # prepare implicit variables device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = [None] * self.sent_encoder.config.num_hidden_layers extended_attention_mask: torch.Tensor = self.sent_encoder.get_extended_attention_mask( attention_mask, input_shape, device ) # define function for checkpointing def partial_encode(*inputs): encoder_outputs = self.sent_encoder.encoder(inputs[0], attention_mask=inputs[1], head_mask=head_mask,) sequence_output = encoder_outputs[0] pooled_output = self.sent_encoder.pooler(sequence_output) return pooled_output # run embedding layer on everything at once embedding_output = self.sent_encoder.embeddings( input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None ) # run encoding and pooling on one mini-batch at a time pooled_output_list = [] for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)): b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions(self, q_ids, q_mask, checkpoint_batch_size=-1): q_reps = self.embed_sentences_checkpointed(q_ids, q_mask, checkpoint_batch_size) return self.project_q(q_reps) def embed_answers(self, a_ids, a_mask, checkpoint_batch_size=-1): a_reps = self.embed_sentences_checkpointed(a_ids, a_mask, checkpoint_batch_size) return self.project_a(a_reps) def forward(self, q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=-1): device = q_ids.device q_reps = self.embed_questions(q_ids, q_mask, checkpoint_batch_size) a_reps = self.embed_answers(a_ids, a_mask, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = (loss_qa + loss_aq) / 2 return loss def make_qa_retriever_model(model_name="google/bert_uncased_L-8_H-512_A-8", from_file=None, device="cuda:0"): tokenizer = AutoTokenizer.from_pretrained(model_name) bert_model = AutoModel.from_pretrained(model_name).to(device) # run bert_model on a dummy batch to get output dimension d_ids = torch.LongTensor( [[bert_model.config.bos_token_id if bert_model.config.bos_token_id is not None else 1]] ).to(device) d_mask = torch.LongTensor([[1]]).to(device) sent_dim = bert_model(d_ids, attention_mask=d_mask)[1].shape[-1] qa_embedder = RetrievalQAEmbedder(bert_model, sent_dim).to(device) if from_file is not None: param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states qa_embedder.load_state_dict(param_dict["model"]) return tokenizer, qa_embedder def make_qa_retriever_batch(qa_list, tokenizer, max_len=64, device="cuda:0"): q_ls = [q for q, a in qa_list] a_ls = [a for q, a in qa_list] q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) a_toks = tokenizer(a_ls, max_length=max_len, padding="max_length", truncation=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) return (q_ids, q_mask, a_ids, a_mask) def train_qa_retriever_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0): model.train() # make iterator train_sampler = RandomSampler(dataset) model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, batch in enumerate(epoch_iterator): q_ids, q_mask, a_ids, a_mask = batch pre_loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) loss = pre_loss.sum() # optimizer loss.backward() optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0 or step == 1: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def train_qa_retriever_joint_epoch(model, dataset_list, tokenizer, optimizer, scheduler, args, e=0): model.train() model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) # make iterator train_samplers = [RandomSampler(dataset) for dataset in dataset_list] data_loaders = [ DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) for dataset, train_sampler in zip(dataset_list, train_samplers) ] iterators = [iter(dloader) for dloader in data_loaders] joint_iter = zip(*iterators) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, (batches,) in enumerate(zip(joint_iter)): for batch in batches: q_ids, q_mask, a_ids, a_mask = batch loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) # optimizer loss.backward() optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset_list[0]) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def evaluate_qa_retriever(model, dataset, tokenizer, args): model.eval() # make iterator eval_sampler = SequentialSampler(dataset) model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=eval_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) tot_loss = 0.0 with torch.no_grad(): for step, batch in enumerate(epoch_iterator): q_ids, q_mask, a_ids, a_mask = batch loss = model(q_ids, q_mask, a_ids, a_mask) tot_loss += loss.item() return tot_loss / (step + 1) def train_qa_retriever(qar_model, qar_tokenizer, qar_train_dset, qar_valid_dset, qar_args): qar_optimizer = AdamW(qar_model.parameters(), lr=qar_args.learning_rate, eps=1e-8) qar_scheduler = get_linear_schedule_with_warmup( qar_optimizer, num_warmup_steps=100, num_training_steps=(qar_args.num_epochs + 1) * math.ceil(len(qar_train_dset) / qar_args.batch_size), ) for e in range(qar_args.num_epochs): train_qa_retriever_epoch(qar_model, qar_train_dset, qar_tokenizer, qar_optimizer, qar_scheduler, qar_args, e) m_save_dict = { "model": qar_model.state_dict(), "optimizer": qar_optimizer.state_dict(), "scheduler": qar_scheduler.state_dict(), } print("Saving model {}".format(qar_args.model_save_name)) torch.save(m_save_dict, "{}_{}.pth".format(qar_args.model_save_name, e)) eval_loss = evaluate_qa_retriever(qar_model, qar_valid_dset, qar_tokenizer, qar_args) print("Evaluation loss epoch {:4d}: {:.3f}".format(e, eval_loss)) ############### # ELI5 seq2seq model training ############### class ELI5DatasetS2S(Dataset): def __init__( self, examples_array, make_doc_fun=None, extra_answer_threshold=3, document_cache=None, training=True ): self.training = training self.data = examples_array self.make_doc_function = make_doc_fun self.document_cache = {} if document_cache is None else document_cache assert not (make_doc_fun is None and document_cache is None) # make index of specific question-answer pairs from multi-answers if self.training: self.qa_id_list = [ (i, j) for i, qa in enumerate(self.data) for j, (a, sc) in enumerate(zip(qa["answers"]["text"], qa["answers"]["score"])) if j == 0 or sc >= extra_answer_threshold ] else: self.qa_id_list = [(i, 0) for i in range(self.data.num_rows)] def __len__(self): return len(self.qa_id_list) def make_example(self, idx): i, j = self.qa_id_list[idx] example = self.data[i] question = example["title"] + " " + example["selftext"] answer = example["answers"]["text"][j] q_id = example["q_id"] if self.make_doc_function is not None: self.document_cache[q_id] = self.document_cache.get(q_id, self.make_doc_function(example["title"])) document = self.document_cache[q_id] in_st = "question: {} context: {}".format( question.lower().replace(" --t--", "").strip(), document.lower().strip(), ) out_st = answer return (in_st, out_st) def __getitem__(self, idx): return self.make_example(idx) def make_qa_s2s_model(model_name="facebook/bart-large", from_file=None, device="cuda:0"): tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) if from_file is not None: param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states model.load_state_dict(param_dict["model"]) return tokenizer, model def make_qa_s2s_batch(qa_list, tokenizer, max_len=64, max_a_len=360, device="cuda:0"): q_ls = [q for q, a in qa_list] a_ls = [a for q, a in qa_list] q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) a_toks = tokenizer(a_ls, max_length=min(max_len, max_a_len), padding="max_length", truncation=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) lm_labels = a_ids[:, 1:].contiguous().clone() lm_labels[a_mask[:, 1:].contiguous() == 0] = -100 model_inputs = { "input_ids": q_ids, "attention_mask": q_mask, "decoder_input_ids": a_ids[:, :-1].contiguous(), "lm_labels": lm_labels, } return model_inputs def train_qa_s2s_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0, curriculum=False): model.train() # make iterator if curriculum: train_sampler = SequentialSampler(dataset) else: train_sampler = RandomSampler(dataset) model_collate_fn = functools.partial( make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, batch_inputs in enumerate(epoch_iterator): pre_loss = model(**batch_inputs)[0] loss = pre_loss.sum() / pre_loss.shape[0] loss.backward() # optimizer if step % args.backward_freq == 0: optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0 or step == 1: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def eval_qa_s2s_epoch(model, dataset, tokenizer, args): model.eval() # make iterator train_sampler = SequentialSampler(dataset) model_collate_fn = functools.partial( make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() with torch.no_grad(): for step, batch_inputs in enumerate(epoch_iterator): pre_loss = model(**batch_inputs)[0] loss = pre_loss.sum() / pre_loss.shape[0] loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0: print( "{:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) print("Total \t L: {:.3f} \t -- {:.3f}".format(loc_loss / loc_steps, time() - st_time,)) def train_qa_s2s(qa_s2s_model, qa_s2s_tokenizer, s2s_train_dset, s2s_valid_dset, s2s_args): s2s_optimizer = AdamW(qa_s2s_model.parameters(), lr=s2s_args.learning_rate, eps=1e-8) s2s_scheduler = get_linear_schedule_with_warmup( s2s_optimizer, num_warmup_steps=400, num_training_steps=(s2s_args.num_epochs + 1) * math.ceil(len(s2s_train_dset) / s2s_args.batch_size), ) for e in range(s2s_args.num_epochs): train_qa_s2s_epoch( qa_s2s_model, s2s_train_dset, qa_s2s_tokenizer, s2s_optimizer, s2s_scheduler, s2s_args, e, curriculum=(e == 0), ) m_save_dict = { "model": qa_s2s_model.state_dict(), "optimizer": s2s_optimizer.state_dict(), "scheduler": s2s_scheduler.state_dict(), } print("Saving model {}".format(s2s_args.model_save_name)) eval_qa_s2s_epoch(qa_s2s_model, s2s_valid_dset, qa_s2s_tokenizer, s2s_args) torch.save(m_save_dict, "{}_{}.pth".format(s2s_args.model_save_name, e)) # generate answer from input "question: ... context: <p> ..." def qa_s2s_generate( question_doc, qa_s2s_model, qa_s2s_tokenizer, num_answers=1, num_beams=None, min_len=64, max_len=256, do_sample=False, temp=1.0, top_p=None, top_k=None, max_input_length=512, device="cuda:0", ): model_inputs = make_qa_s2s_batch([(question_doc, "A")], qa_s2s_tokenizer, max_input_length, device=device,) n_beams = num_answers if num_beams is None else max(num_beams, num_answers) generated_ids = qa_s2s_model.generate( input_ids=model_inputs["input_ids"], attention_mask=model_inputs["attention_mask"], min_length=min_len, max_length=max_len, do_sample=do_sample, early_stopping=True, num_beams=1 if do_sample else n_beams, temperature=temp, top_k=top_k, top_p=top_p, eos_token_id=qa_s2s_tokenizer.eos_token_id, no_repeat_ngram_size=3, num_return_sequences=num_answers, decoder_start_token_id=qa_s2s_tokenizer.bos_token_id, ) return [qa_s2s_tokenizer.decode(ans_ids, skip_special_tokens=True).strip() for ans_ids in generated_ids] ############### # ELI5-trained retrieval model usage ############### def embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length=128, device="cuda:0"): a_toks = tokenizer(passages, max_length=max_length, padding="max_length", truncation=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) with torch.no_grad(): a_reps = qa_embedder.embed_answers(a_ids, a_mask).cpu().type(torch.float) return a_reps.numpy() def embed_questions_for_retrieval(q_ls, tokenizer, qa_embedder, device="cuda:0"): q_toks = tokenizer(q_ls, max_length=128, padding="max_length", truncation=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) with torch.no_grad(): q_reps = qa_embedder.embed_questions(q_ids, q_mask).cpu().type(torch.float) return q_reps.numpy() def make_qa_dense_index( qa_embedder, tokenizer, passages_dset, batch_size=512, max_length=128, index_name="kilt_passages_reps.dat", dtype="float32", device="cuda:0", ): st_time = time() fp = np.memmap(index_name, dtype=dtype, mode="w+", shape=(passages_dset.num_rows, 128)) n_batches = math.ceil(passages_dset.num_rows / batch_size) for i in range(n_batches): passages = [p for p in passages_dset[i * batch_size : (i + 1) * batch_size]["passage_text"]] reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length, device) fp[i * batch_size : (i + 1) * batch_size] = reps if i % 50 == 0: print(i, time() - st_time) def evaluate_retriever(qa_list, retriever_func, scoring_func, n_ret=10, verbose=False): total_retriever_time = 0.0 total_retriever_score = 0.0 st_time = time() for i, (question, answer) in enumerate(qa_list): r_time = time() retrieved_passages = retriever_func(question, n_ret) total_retriever_time += time() - r_time total_retriever_score += scoring_func(retrieved_passages, answer) if verbose and ((i + 1) % 500 == 0 or i <= 1): print( "{:03d}: S-{:.4f} T-{:.4f} | {:.2f}".format( i + 1, total_retriever_score / (i + 1), total_retriever_time / (i + 1), time() - st_time ) ) return {"idf_recall": total_retriever_score / (i + 1), "retrieval_time": total_retriever_time / (i + 1)} # build a support document for the question out of Wikipedia snippets def query_qa_dense_index( question, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20, device="cuda:0" ): q_rep = embed_questions_for_retrieval([question], tokenizer, qa_embedder, device=device) D, I = wiki_index.search(q_rep, 2 * n_results) res_passages = [wiki_passages[int(i)] for i in I[0]] support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] for r, sc in zip(res_list, D[0]): r["score"] = float(sc) return support_doc, res_list def batch_query_qa_dense_index(questions, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): q_rep = embed_questions_for_retrieval(questions, tokenizer, qa_embedder) D, I = wiki_index.search(q_rep, n_results) res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] support_doc_lst = [ "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst ] all_res_lists = [] for (res_passages, dl) in zip(res_passages_lst, D): res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages] for r, sc in zip(res_list, dl): r["score"] = float(sc) all_res_lists += [res_list[:]] return support_doc_lst, all_res_lists # find nearest neighbors of an answer or declarative text in Wikipedia snippets def query_qa_dense_index_nn(passage, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20): a_rep = embed_passages_for_retrieval([passage], tokenizer, qa_embedder) D, I = wiki_index.search(a_rep, 2 * n_results) res_passages = [wiki_passages[int(i)] for i in I[0]] support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] for r, sc, i in zip(res_list, D[0], I[0]): r["passage_id"] = int(i) r["score"] = float(sc) return support_doc, res_list def batch_query_qa_dense_index_nn(passages, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): a_reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder) D, I = wiki_index.search(a_reps, n_results) res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] support_doc_lst = [ "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst ] all_res_lists = [] for (res_passages, dl, il) in zip(res_passages_lst, D, I): res_list = [dict([(k, p[k]) for k in wiki_passages.column_names]) for p in res_passages] for r, sc, i in zip(res_list, dl, il): r["passage_id"] = int(i) r["score"] = float(sc) all_res_lists += [res_list[:]] return support_doc_lst, all_res_lists
27,779
41.477064
119
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/question-answering/run_squad.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" import argparse import glob import logging import os import random import timeit import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): try: # set global_step to gobal_step of last saved checkpoint from model path checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproductibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) # model outputs are always tuple in transformers (see doc) loss = outputs[0] if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss # Save model checkpoint if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] feature_indices = batch[3] # XLNet and XLM use more arguments for their predictions if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) # for lang_id-sensitive xlm models if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) for i, feature_index in enumerate(feature_indices): eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None # XLNet and XLM use a more complex post-processing procedure if args.model_type in ["xlnet", "xlm"]: start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_dir = args.data_dir if args.data_dir else "." cached_features_file = os.path.join( input_dir, "cached_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) # Init features and dataset from cache if it exists if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) else: logger.info("Creating features from dataset file at %s", input_dir) if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): try: import tensorflow_datasets as tfds except ImportError: raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") if args.version_2_with_negative: logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.") tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) else: processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_TYPES), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.", ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.", ) parser.add_argument( "--verbose_logging", action="store_true", help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.", ) parser.add_argument( "--lang_id", default=0, type=int, help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)", ) parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") args = parser.parse_args() if args.doc_stride >= args.max_seq_length - args.max_query_length: logger.warning( "WARNING - You've set a doc stride which may be superior to the document length in some " "examples. This could result in errors when building features from the examples. Please reduce the doc " "stride or increase the maximum length to ensure the features are correctly built." ) if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = AutoModelForQuestionAnswering.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = AutoModelForQuestionAnswering.from_pretrained(args.output_dir) # , force_download=True) tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: logger.info("Loading checkpoints saved during training for evaluation") checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs else: logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path) checkpoints = [args.model_name_or_path] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = AutoModelForQuestionAnswering.from_pretrained(checkpoint) # , force_download=True) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
34,081
40.462287
126
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/token-classification/run_pl_ner.py
import argparse import glob import logging import os import numpy as np import torch from seqeval.metrics import f1_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from lightning_base import BaseTransformer, add_generic_args, generic_train from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file logger = logging.getLogger(__name__) class NERTransformer(BaseTransformer): """ A training module for NER. See BaseTransformer for the core options. """ mode = "token-classification" def __init__(self, hparams): self.labels = get_labels(hparams.labels) num_labels = len(self.labels) self.pad_token_label_id = CrossEntropyLoss().ignore_index super().__init__(hparams, num_labels, self.mode) def forward(self, **inputs): return self.model(**inputs) def training_step(self, batch, batch_num): "Compute loss and log." inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids outputs = self(**inputs) loss = outputs[0] tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def prepare_data(self): "Called to initialize data. Use the call to construct features" args = self.hparams for mode in ["train", "dev", "test"]: cached_features_file = self._feature_file(mode) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) examples = read_examples_from_file(args.data_dir, mode) features = convert_examples_to_features( examples, self.labels, args.max_seq_length, self.tokenizer, cls_token_at_end=bool(self.config.model_type in ["xlnet"]), cls_token=self.tokenizer.cls_token, cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0, sep_token=self.tokenizer.sep_token, sep_token_extra=False, pad_on_left=bool(self.config.model_type in ["xlnet"]), pad_token=self.tokenizer.pad_token_id, pad_token_segment_id=self.tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, ) logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) def load_dataset(self, mode, batch_size): "Load datasets. Called after prepare data." cached_features_file = self._feature_file(mode) logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) if features[0].token_type_ids is not None: all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) else: all_token_type_ids = torch.tensor([0 for f in features], dtype=torch.long) # HACK(we will not use this anymore soon) all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) return DataLoader( TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label_ids), batch_size=batch_size ) def validation_step(self, batch, batch_nb): "Compute validation" inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids outputs = self(**inputs) tmp_eval_loss, logits = outputs[:2] preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _eval_end(self, outputs): "Evaluation called for both Val and Test" val_loss_mean = torch.stack([x["val_loss"] for x in outputs]).mean() preds = np.concatenate([x["pred"] for x in outputs], axis=0) preds = np.argmax(preds, axis=2) out_label_ids = np.concatenate([x["target"] for x in outputs], axis=0) label_map = {i: label for i, label in enumerate(self.labels)} out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] for i in range(out_label_ids.shape[0]): for j in range(out_label_ids.shape[1]): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) results = { "val_loss": val_loss_mean, "precision": precision_score(out_label_list, preds_list), "recall": recall_score(out_label_list, preds_list), "f1": f1_score(out_label_list, preds_list), } ret = {k: v for k, v in results.items()} ret["log"] = results return ret, preds_list, out_label_list def validation_epoch_end(self, outputs): # when stable ret, preds, targets = self._eval_end(outputs) logs = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def test_epoch_end(self, outputs): # updating to test_epoch_end instead of deprecated test_end ret, predictions, targets = self._eval_end(outputs) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 logs = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def add_model_specific_args(parser, root_dir): # Add NER specific options BaseTransformer.add_model_specific_args(parser, root_dir) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--labels", default="", type=str, help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.", ) parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) return parser if __name__ == "__main__": parser = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) parser = NERTransformer.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() model = NERTransformer(args) trainer = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L169 checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "checkpointepoch=*.ckpt"), recursive=True))) model = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
8,769
42.20197
118
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/token-classification/run_ner.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for named entity recognition on CoNLL-2003. """ import logging import os import sys from dataclasses import dataclass, field from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import f1_score, precision_score, recall_score from torch import nn from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from utils_ner import NerDataset, Split, get_labels logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ data_dir: str = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) labels: Optional[str] = field( default=None, metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."}, ) max_seq_length: int = field( default=128, metadata={ "help": "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) # Prepare CONLL-2003 task labels = get_labels(data_args.labels) label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)} num_labels = len(labels) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, id2label=label_map, label2id={label: i for i, label in enumerate(labels)}, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets train_dataset = ( NerDataset( data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) eval_dataset = ( NerDataset( data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]: preds = np.argmax(predictions, axis=2) batch_size, seq_len = preds.shape out_label_list = [[] for _ in range(batch_size)] preds_list = [[] for _ in range(batch_size)] for i in range(batch_size): for j in range(seq_len): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) return preds_list, out_label_list def compute_metrics(p: EvalPrediction) -> Dict: preds_list, out_label_list = align_predictions(p.predictions, p.label_ids) return { "precision": precision_score(out_label_list, preds_list), "recall": recall_score(out_label_list, preds_list), "f1": f1_score(out_label_list, preds_list), } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") result = trainer.evaluate() output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt") if trainer.is_world_master(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in result.items(): logger.info(" %s = %s", key, value) writer.write("%s = %s\n" % (key, value)) results.update(result) # Predict if training_args.do_predict: test_dataset = NerDataset( data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, ) predictions, label_ids, metrics = trainer.predict(test_dataset) preds_list, _ = align_predictions(predictions, label_ids) output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt") if trainer.is_world_master(): with open(output_test_results_file, "w") as writer: for key, value in metrics.items(): logger.info(" %s = %s", key, value) writer.write("%s = %s\n" % (key, value)) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") if trainer.is_world_master(): with open(output_test_predictions_file, "w") as writer: with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f: example_id = 0 for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": writer.write(line) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: output_line = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n" writer.write(output_line) else: logger.warning( "Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0] ) return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
11,184
35.672131
133
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/token-classification/utils_ner.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """ import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available logger = logging.getLogger(__name__) @dataclass class InputExample: """ A single training/test example for token classification. Args: guid: Unique id for the example. words: list. The words of the sequence. labels: (Optional) list. The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples. """ guid: str words: List[str] labels: Optional[List[str]] @dataclass class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. """ input_ids: List[int] attention_mask: List[int] token_type_ids: Optional[List[int]] = None label_ids: Optional[List[int]] = None class Split(Enum): train = "train" dev = "dev" test = "test" if is_torch_available(): import torch from torch import nn from torch.utils.data.dataset import Dataset class NerDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index # Use cross entropy ignore_index as padding label id so that only # real label ids contribute to the loss later. def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, labels: List[str], model_type: str, max_seq_length: Optional[int] = None, overwrite_cache=False, mode: Split = Split.train, ): # Load data features from cache or dataset file cached_features_file = os.path.join( data_dir, "cached_{}_{}_{}".format(mode.value, tokenizer.__class__.__name__, str(max_seq_length)), ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}") self.features = torch.load(cached_features_file) else: logger.info(f"Creating features from dataset file at {data_dir}") examples = read_examples_from_file(data_dir, mode) # TODO clean up all this to leverage built-in features of tokenizers self.features = convert_examples_to_features( examples, labels, max_seq_length, tokenizer, cls_token_at_end=bool(model_type in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=False, # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(tokenizer.padding_side == "left"), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, ) logger.info(f"Saving features into cached file {cached_features_file}") torch.save(self.features, cached_features_file) def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] if is_tf_available(): import tensorflow as tf class TFNerDataset: """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] pad_token_label_id: int = -1 # Use cross entropy ignore_index as padding label id so that only # real label ids contribute to the loss later. def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, labels: List[str], model_type: str, max_seq_length: Optional[int] = None, overwrite_cache=False, mode: Split = Split.train, ): examples = read_examples_from_file(data_dir, mode) # TODO clean up all this to leverage built-in features of tokenizers self.features = convert_examples_to_features( examples, labels, max_seq_length, tokenizer, cls_token_at_end=bool(model_type in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=False, # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(tokenizer.padding_side == "left"), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: self.dataset = tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), ( {"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([None]), ), ) else: self.dataset = tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64), ( { "input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None]), "token_type_ids": tf.TensorShape([None]), }, tf.TensorShape([None]), ), ) def get_dataset(self): return self.dataset def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] def read_examples_from_file(data_dir, mode: Union[Split, str]) -> List[InputExample]: if isinstance(mode, Split): mode = mode.value file_path = os.path.join(data_dir, f"{mode}.txt") guid_index = 1 examples = [] with open(file_path, encoding="utf-8") as f: words = [] labels = [] for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}", words=words, labels=labels)) guid_index += 1 words = [] labels = [] else: splits = line.split(" ") words.append(splits[0]) if len(splits) > 1: labels.append(splits[-1].replace("\n", "")) else: # Examples could have no label for mode = "test" labels.append("O") if words: examples.append(InputExample(guid=f"{mode}-{guid_index}", words=words, labels=labels)) return examples def convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_seq_length: int, tokenizer: PreTrainedTokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=-100, sequence_a_segment_id=0, mask_padding_with_zero=True, ) -> List[InputFeatures]: """ Loads a data file into a list of `InputFeatures` `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ # TODO clean up all this to leverage built-in features of tokenizers label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10_000 == 0: logger.info("Writing example %d of %d", ex_index, len(examples)) tokens = [] label_ids = [] for word, label in zip(example.words, example.labels): word_tokens = tokenizer.tokenize(word) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(word_tokens) > 0: tokens.extend(word_tokens) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = tokenizer.num_special_tokens_to_add() if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[: (max_seq_length - special_tokens_count)] label_ids = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s", example.guid) logger.info("tokens: %s", " ".join([str(x) for x in tokens])) logger.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) logger.info("label_ids: %s", " ".join([str(x) for x in label_ids])) if "token_type_ids" not in tokenizer.model_input_names: segment_ids = None features.append( InputFeatures( input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids ) ) return features def get_labels(path: str) -> List[str]: if path: with open(path, "r") as f: labels = f.read().splitlines() if "O" not in labels: labels = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
15,996
39.092732
160
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/bertology/run_bertology.py
#!/usr/bin/env python3 # Copyright 2018 CMU and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Bertology: this script shows how you can explore the internals of the models in the library to: - compute the entropy of the head attentions - compute the importance of each head - prune (remove) the low importance head. Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650) which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1 """ import argparse import logging import os from datetime import datetime import numpy as np import torch from torch.utils.data import DataLoader, SequentialSampler, Subset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, GlueDataset, default_data_collator, glue_compute_metrics, glue_output_modes, glue_processors, set_seed, ) logger = logging.getLogger(__name__) def entropy(p): """ Compute the entropy of a probability distribution """ plogp = p * torch.log(p) plogp[p == 0] = 0 return -plogp.sum(dim=-1) def print_2d_tensor(tensor): """ Print a 2D tensor """ logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor)))) for row in range(len(tensor)): if tensor.dtype != torch.long: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data)) else: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data)) def compute_heads_importance( args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False ): """ This method shows how to compute: - head attention entropy - head importance scores according to http://arxiv.org/abs/1905.10650 """ # Prepare our tensors n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads head_importance = torch.zeros(n_layers, n_heads).to(args.device) attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) if head_mask is None: head_mask = torch.ones(n_layers, n_heads).to(args.device) head_mask.requires_grad_(requires_grad=True) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: head_mask = None preds = None labels = None tot_tokens = 0.0 for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): for k, v in inputs.items(): inputs[k] = v.to(args.device) # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) outputs = model(**inputs, head_mask=head_mask) loss, logits, all_attentions = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask if compute_entropy: for layer, attn in enumerate(all_attentions): masked_entropy = entropy(attn.detach()) * inputs["attention_mask"].float().unsqueeze(1) attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() # Also store our logits/labels if we want to compute metrics afterwards if preds is None: preds = logits.detach().cpu().numpy() labels = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) labels = np.append(labels, inputs["labels"].detach().cpu().numpy(), axis=0) tot_tokens += inputs["attention_mask"].float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: exponent = 2 norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent) head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 if not args.dont_normalize_global_importance: head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print/save matrices np.save(os.path.join(args.output_dir, "attn_entropy.npy"), attn_entropy.detach().cpu().numpy()) np.save(os.path.join(args.output_dir, "head_importance.npy"), head_importance.detach().cpu().numpy()) logger.info("Attention entropies") print_2d_tensor(attn_entropy) logger.info("Head importance scores") print_2d_tensor(head_importance) logger.info("Head ranked by importance scores") head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange( head_importance.numel(), device=args.device ) head_ranks = head_ranks.view_as(head_importance) print_2d_tensor(head_ranks) return attn_entropy, head_importance, preds, labels def mask_heads(args, model, eval_dataloader): """ This method shows how to mask head (set some heads to zero), to test the effect on the network, based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) original_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold) new_head_mask = torch.ones_like(head_importance) num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount)) current_score = original_score while current_score >= original_score * args.masking_threshold: head_mask = new_head_mask.clone() # save current head mask # heads from least important to most - keep only not-masked heads head_importance[head_mask == 0.0] = float("Inf") current_heads_to_mask = head_importance.view(-1).sort()[1] if len(current_heads_to_mask) <= num_to_mask: break # mask heads current_heads_to_mask = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist())) new_head_mask = new_head_mask.view(-1) new_head_mask[current_heads_to_mask] = 0.0 new_head_mask = new_head_mask.view_as(head_mask) new_head_mask = new_head_mask.clone().detach() print_2d_tensor(new_head_mask) # Compute metric and head importance again _, head_importance, preds, labels = compute_heads_importance( args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask ) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) current_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)", current_score, new_head_mask.sum(), new_head_mask.sum() / new_head_mask.numel() * 100, ) logger.info("Final head mask") print_2d_tensor(head_mask) np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy()) return head_mask def prune_heads(args, model, eval_dataloader, head_mask): """ This method shows how to prune head (remove heads weights) based on the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ # Try pruning and test time speedup # Pruning is like masking but we actually remove the masked weights before_time = datetime.now() _, _, preds, labels = compute_heads_importance( args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask ) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_masking = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] original_time = datetime.now() - before_time original_num_params = sum(p.numel() for p in model.parameters()) heads_to_prune = dict( (layer, (1 - head_mask[layer].long()).nonzero().squeeze().tolist()) for layer in range(len(head_mask)) ) assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item() model.prune_heads(heads_to_prune) pruned_num_params = sum(p.numel() for p in model.parameters()) before_time = datetime.now() _, _, preds, labels = compute_heads_importance( args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=None, actually_pruned=True, ) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_pruning = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] new_time = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", original_num_params, pruned_num_params, pruned_num_params / original_num_params * 100, ) logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning) logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time / new_time * 100) def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.", ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(glue_processors.keys()), ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name_or_path", ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name_or_path", ) parser.add_argument( "--cache_dir", default=None, type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir", action="store_true", help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer", action="store_true", help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance", action="store_true", help="Don't normalize all importance scores between 0 and 1", ) parser.add_argument( "--try_masking", action="store_true", help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold", default=0.9, type=float, help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).", ) parser.add_argument( "--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.") parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded.", ) parser.add_argument("--batch_size", default=1, type=int, help="Batch size.") parser.add_argument("--seed", type=int, default=42) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") args = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) args.device = torch.device("cuda", args.local_rank) args.n_gpu = 1 torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1))) # Set seeds set_seed(args.seed) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in glue_processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = glue_processors[args.task_name]() args.output_mode = glue_output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, output_attentions=True, cache_dir=args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, cache_dir=args.cache_dir, ) model = AutoModelForSequenceClassification.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir, ) # Distributed and parallel training model.to(args.device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) elif args.n_gpu > 1: model = torch.nn.DataParallel(model) # Print/save training arguments os.makedirs(args.output_dir, exist_ok=True) torch.save(args, os.path.join(args.output_dir, "run_args.bin")) logger.info("Training/evaluation parameters %s", args) # Prepare dataset for the GLUE task eval_dataset = GlueDataset(args, tokenizer=tokenizer, mode="dev") if args.data_subset > 0: eval_dataset = Subset(eval_dataset, list(range(min(args.data_subset, len(eval_dataset))))) eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.batch_size, collate_fn=default_data_collator ) # Compute head entropy and importance score compute_heads_importance(args, model, eval_dataloader) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: head_mask = mask_heads(args, model, eval_dataloader) prune_heads(args, model, eval_dataloader, head_mask) if __name__ == "__main__": main()
18,191
40.158371
118
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/text-classification/run_pl_glue.py
import argparse import glob import logging import os import time import numpy as np import torch from torch.utils.data import DataLoader, TensorDataset from lightning_base import BaseTransformer, add_generic_args, generic_train from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes from transformers import glue_processors as processors from transformers import glue_tasks_num_labels logger = logging.getLogger(__name__) class GLUETransformer(BaseTransformer): mode = "sequence-classification" def __init__(self, hparams): hparams.glue_output_mode = glue_output_modes[hparams.task] num_labels = glue_tasks_num_labels[hparams.task] super().__init__(hparams, num_labels, self.mode) def forward(self, **inputs): return self.model(**inputs) def training_step(self, batch, batch_idx): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": inputs["token_type_ids"] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None outputs = self(**inputs) loss = outputs[0] tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def prepare_data(self): "Called to initialize data. Use the call to construct features" args = self.hparams processor = processors[args.task]() self.labels = processor.get_labels() for mode in ["train", "dev"]: cached_features_file = self._feature_file(mode) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) examples = ( processor.get_dev_examples(args.data_dir) if mode == "dev" else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode, ) logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) def load_dataset(self, mode, batch_size): "Load datasets. Called after prepare data." # We test on dev set to compare to benchmarks without having to submit to GLUE server mode = "dev" if mode == "test" else mode cached_features_file = self._feature_file(mode) logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if self.hparams.glue_output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif self.hparams.glue_output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) return DataLoader( TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels), batch_size=batch_size, shuffle=True, ) def validation_step(self, batch, batch_idx): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": inputs["token_type_ids"] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None outputs = self(**inputs) tmp_eval_loss, logits = outputs[:2] preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _eval_end(self, outputs) -> tuple: val_loss_mean = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item() preds = np.concatenate([x["pred"] for x in outputs], axis=0) if self.hparams.glue_output_mode == "classification": preds = np.argmax(preds, axis=1) elif self.hparams.glue_output_mode == "regression": preds = np.squeeze(preds) out_label_ids = np.concatenate([x["target"] for x in outputs], axis=0) out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] results = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task, preds, out_label_ids)} ret = {k: v for k, v in results.items()} ret["log"] = results return ret, preds_list, out_label_list def validation_epoch_end(self, outputs: list) -> dict: ret, preds, targets = self._eval_end(outputs) logs = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def test_epoch_end(self, outputs) -> dict: ret, predictions, targets = self._eval_end(outputs) logs = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--task", default="", type=str, required=True, help="The GLUE task to run", ) parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) return parser if __name__ == "__main__": parser = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) parser = GLUETransformer.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: args.output_dir = os.path.join("./results", f"{args.task}_{time.strftime('%Y%m%d_%H%M%S')}",) os.makedirs(args.output_dir) model = GLUETransformer(args) trainer = generic_train(model, args) # Optionally, predict on dev set and write to output_dir if args.do_predict: checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "checkpointepoch=*.ckpt"), recursive=True))) model = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
7,757
39.831579
118
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/text-classification/run_xnli.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM). Adapted from `examples/text-classification/run_glue.py`""" import argparse import glob import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import ( WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, ) from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import xnli_compute_metrics as compute_metrics from transformers import xnli_output_modes as output_modes from transformers import xnli_processors as processors try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to gobal_step of last saved checkpoint from model path global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) set_seed(args) # Added here for reproductibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert"] else None ) # XLM and DistilBERT don't use segment_ids outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert"] else None ) # XLM and DistilBERT don't use segment_ids outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) else: raise ValueError("No other `output_mode` for XNLI.") result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task](language=args.language, train_language=args.train_language) output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}_{}".format( "test" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), str(args.train_language if (not evaluate and args.train_language is not None) else args.language), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() examples = ( processor.get_test_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, tokenizer, max_length=args.max_seq_length, label_list=label_list, output_mode=output_mode, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) else: raise ValueError("No other `output_mode` for XNLI.") dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.", ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--language", default=None, type=str, required=True, help="Evaluation language. Also train language if `train_language` is set to None.", ) parser.add_argument( "--train_language", default=None, type=str, help="Train language if is different of the evaluation language." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default=None, type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Prepare XNLI task args.task_name = "xnli" if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name](language=args.language, train_language=args.train_language) args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir, ) args.model_type = config.model_type tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir, ) model = AutoModelForSequenceClassification.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir, ) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = AutoModelForSequenceClassification.from_pretrained(args.output_dir) tokenizer = AutoTokenizer.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = AutoModelForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + "_{}".format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
27,174
42.972492
150
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/adversarial/run_hans.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on HANS.""" import logging import os from dataclasses import dataclass, field from typing import Dict, List, Optional import numpy as np import torch from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from utils_hans import HansDataset, InputFeatures, hans_processors, hans_tasks_num_labels logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: str = field( metadata={"help": "The name of the task to train selected in the list: " + ", ".join(hans_processors.keys())} ) data_dir: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) max_seq_length: int = field( default=128, metadata={ "help": "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) def hans_data_collator(features: List[InputFeatures]) -> Dict[str, torch.Tensor]: """ Data collator that removes the "pairID" key if present. """ batch = default_data_collator(features) _ = batch.pop("pairID", None) return batch def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) try: num_labels = hans_tasks_num_labels[data_args.task_name] except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name)) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets train_dataset = ( HansDataset( data_dir=data_args.data_dir, tokenizer=tokenizer, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, ) if training_args.do_train else None ) eval_dataset = ( HansDataset( data_dir=data_args.data_dir, tokenizer=tokenizer, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, evaluate=True, ) if training_args.do_eval else None ) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=hans_data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") output = trainer.predict(eval_dataset) preds = output.predictions preds = np.argmax(preds, axis=1) pair_ids = [ex.pairID for ex in eval_dataset] output_eval_file = os.path.join(training_args.output_dir, "hans_predictions.txt") label_list = eval_dataset.get_labels() if trainer.is_world_master(): with open(output_eval_file, "w") as writer: writer.write("pairID,gold_label\n") for pid, pred in zip(pair_ids, preds): writer.write("ex" + str(pid) + "," + label_list[int(pred)] + "\n") trainer._log(output.metrics) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
7,810
32.668103
133
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/adversarial/utils_hans.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) logger = logging.getLogger(__name__) @dataclass(frozen=True) class InputExample: """ A single training/test example for simple sequence classification. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. pairID: (Optional) string. Unique identifier for the pair of sentences. """ guid: str text_a: str text_b: Optional[str] = None label: Optional[str] = None pairID: Optional[str] = None @dataclass(frozen=True) class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. Args: input_ids: Indices of input sequence tokens in the vocabulary. attention_mask: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens. token_type_ids: (Optional) Segment token indices to indicate first and second portions of the inputs. Only some models use them. label: (Optional) Label corresponding to the input. Int for classification problems, float for regression problems. pairID: (Optional) Unique identifier for the pair of sentences. """ input_ids: List[int] attention_mask: Optional[List[int]] = None token_type_ids: Optional[List[int]] = None label: Optional[Union[int, float]] = None pairID: Optional[int] = None if is_torch_available(): import torch from torch.utils.data.dataset import Dataset class HansDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = None, overwrite_cache=False, evaluate: bool = False, ): processor = hans_processors[task]() cached_features_file = os.path.join( data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", tokenizer.__class__.__name__, str(max_seq_length), task, ), ) label_list = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] self.label_list = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}") self.features = torch.load(cached_features_file) else: logger.info(f"Creating features from dataset file at {data_dir}") examples = ( processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir) ) logger.info("Training examples: %s", len(examples)) self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer) logger.info("Saving features into cached file %s", cached_features_file) torch.save(self.features, cached_features_file) def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] def get_labels(self): return self.label_list if is_tf_available(): import tensorflow as tf class TFHansDataset: """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = 128, overwrite_cache=False, evaluate: bool = False, ): processor = hans_processors[task]() label_list = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] self.label_list = label_list examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir) self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer) def gen(): for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) self.dataset = tf.data.Dataset.from_generator( gen, ( { "example_id": tf.int32, "input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32, }, tf.int64, ), ( { "example_id": tf.TensorShape([]), "input_ids": tf.TensorShape([None, None]), "attention_mask": tf.TensorShape([None, None]), "token_type_ids": tf.TensorShape([None, None]), }, tf.TensorShape([]), ), ) def get_dataset(self): return self.dataset def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] def get_labels(self): return self.label_list class HansProcessor(DataProcessor): """Processor for the HANS data set.""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[5] text_b = line[6] pairID = line[7][2:] if line[7].startswith("ex") else line[7] label = line[-1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID)) return examples def hans_convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, ): """ Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples. max_length: Maximum example length. label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method. output_mode: String indicating the output mode. Either ``regression`` or ``classification``. Returns: A list of task-specific ``InputFeatures`` which can be fed to the model. """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d" % (ex_index)) inputs = tokenizer( example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, padding="max_length", truncation=True, return_overflowing_tokens=True, ) label = label_map[example.label] if example.label in label_map else 0 pairID = int(example.pairID) features.append(InputFeatures(**inputs, label=label, pairID=pairID)) for i, example in enumerate(examples[:5]): logger.info("*** Example ***") logger.info(f"guid: {example}") logger.info(f"features: {features[i]}") return features hans_tasks_num_labels = { "hans": 3, } hans_processors = { "hans": HansProcessor, }
11,529
33.728916
118
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/text-generation/run_generation.py
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet) """ import argparse import logging import numpy as np import torch from transformers import ( CTRLLMHeadModel, CTRLTokenizer, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, TransfoXLLMHeadModel, TransfoXLTokenizer, XLMTokenizer, XLMWithLMHeadModel, XLNetLMHeadModel, XLNetTokenizer, ) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop MODEL_CLASSES = { "gpt2": (GPT2LMHeadModel, GPT2Tokenizer), "ctrl": (CTRLLMHeadModel, CTRLTokenizer), "openai-gpt": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), "xlnet": (XLNetLMHeadModel, XLNetTokenizer), "transfo-xl": (TransfoXLLMHeadModel, TransfoXLTokenizer), "xlm": (XLMWithLMHeadModel, XLMTokenizer), } # Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia # in https://github.com/rusiaaman/XLNet-gen#methodology # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos>""" def set_seed(args): np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) # # Functions to prepare models' input # def prepare_ctrl_input(args, _, tokenizer, prompt_text): if args.temperature > 0.7: logger.info("CTRL typically works better with lower temperatures (and lower top_k).") encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False) if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()): logger.info("WARNING! You are not starting your generation from a control code so you won't get good results") return prompt_text def prepare_xlm_input(args, model, tokenizer, prompt_text): # kwargs = {"language": None, "mask_token_id": None} # Set the language use_lang_emb = hasattr(model.config, "use_lang_emb") and model.config.use_lang_emb if hasattr(model.config, "lang2id") and use_lang_emb: available_languages = model.config.lang2id.keys() if args.xlm_language in available_languages: language = args.xlm_language else: language = None while language not in available_languages: language = input("Using XLM. Select language in " + str(list(available_languages)) + " >>> ") model.config.lang_id = model.config.lang2id[language] # kwargs["language"] = tokenizer.lang2id[language] # TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers # XLM masked-language modeling (MLM) models need masked token # is_xlm_mlm = "mlm" in args.model_name_or_path # if is_xlm_mlm: # kwargs["mask_token_id"] = tokenizer.mask_token_id return prompt_text def prepare_xlnet_input(args, _, tokenizer, prompt_text): prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text return prompt_text def prepare_transfoxl_input(args, _, tokenizer, prompt_text): prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text return prompt_text PREPROCESSING_FUNCTIONS = { "ctrl": prepare_ctrl_input, "xlm": prepare_xlm_input, "xlnet": prepare_xlnet_input, "transfo-xl": prepare_transfoxl_input, } def adjust_length_to_model(length, max_sequence_length): if length < 0 and max_sequence_length > 0: length = max_sequence_length elif 0 < max_sequence_length < length: length = max_sequence_length # No generation bigger than model size elif length < 0: length = MAX_LENGTH # avoid infinite loop return length def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument("--prompt", type=str, default="") parser.add_argument("--length", type=int, default=20) parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped") parser.add_argument( "--temperature", type=float, default=1.0, help="temperature of 1.0 has no effect, lower tend toward greedy sampling", ) parser.add_argument( "--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2" ) parser.add_argument("--k", type=int, default=0) parser.add_argument("--p", type=float, default=0.9) parser.add_argument("--padding_text", type=str, default="", help="Padding text for Transfo-XL and XLNet.") parser.add_argument("--xlm_language", type=str, default="", help="Optional language when used with the XLM model.") parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.") args = parser.parse_args() args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() set_seed(args) # Initialize the model and tokenizer try: args.model_type = args.model_type.lower() model_class, tokenizer_class = MODEL_CLASSES[args.model_type] except KeyError: raise KeyError("the model {} you specified is not supported. You are welcome to add it and open a PR :)") tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path) model = model_class.from_pretrained(args.model_name_or_path) model.to(args.device) args.length = adjust_length_to_model(args.length, max_sequence_length=model.config.max_position_embeddings) logger.info(args) prompt_text = args.prompt if args.prompt else input("Model prompt >>> ") # Different models need different input formatting and/or extra arguments requires_preprocessing = args.model_type in PREPROCESSING_FUNCTIONS.keys() if requires_preprocessing: prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type) preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text) if model.__class__.__name__ in ["TransfoXLLMHeadModel"]: tokenizer_kwargs = {"add_space_before_punct_symbol": True} else: tokenizer_kwargs = {} encoded_prompt = tokenizer.encode( preprocessed_prompt_text, add_special_tokens=False, return_tensors="pt", **tokenizer_kwargs ) else: encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors="pt") encoded_prompt = encoded_prompt.to(args.device) if encoded_prompt.size()[-1] == 0: input_ids = None else: input_ids = encoded_prompt output_sequences = model.generate( input_ids=input_ids, max_length=args.length + len(encoded_prompt[0]), temperature=args.temperature, top_k=args.k, top_p=args.p, repetition_penalty=args.repetition_penalty, do_sample=True, num_return_sequences=args.num_return_sequences, ) # Remove the batch dimension when returning multiple sequences if len(output_sequences.shape) > 2: output_sequences.squeeze_() generated_sequences = [] for generated_sequence_idx, generated_sequence in enumerate(output_sequences): print("=== GENERATED SEQUENCE {} ===".format(generated_sequence_idx + 1)) generated_sequence = generated_sequence.tolist() # Decode text text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) # Remove all text after the stop token text = text[: text.find(args.stop_token) if args.stop_token else None] # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing total_sequence = ( prompt_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :] ) generated_sequences.append(total_sequence) print(total_sequence) return generated_sequences if __name__ == "__main__": main()
10,422
36.901818
119
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/text-generation/pplm/run_pplm.py
#! /usr/bin/env python3 # coding=utf-8 # Copyright (c) 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Example command with bag of words: python run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95 Example command with discriminator: python run_pplm.py -D sentiment --class_label 3 --cond_text "The lake" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95 """ import argparse import json from operator import add from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from tqdm import trange from pplm_classification_head import ClassificationHead from transformers import GPT2Tokenizer from transformers.file_utils import cached_path from transformers.modeling_gpt2 import GPT2LMHeadModel PPLM_BOW = 1 PPLM_DISCRIM = 2 PPLM_BOW_DISCRIM = 3 SMALL_CONST = 1e-15 BIG_CONST = 1e10 BAG_OF_WORDS_ARCHIVE_MAP = { "legal": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/legal.txt", "military": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/military.txt", "politics": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/politics.txt", "religion": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/religion.txt", "science": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/science.txt", "space": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/space.txt", "technology": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/technology.txt", } DISCRIMINATOR_MODELS_PARAMS = { "clickbait": { "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt", "class_size": 2, "embed_size": 1024, "class_vocab": {"non_clickbait": 0, "clickbait": 1}, "default_class": 1, "pretrained_model": "gpt2-medium", }, "sentiment": { "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt", "class_size": 5, "embed_size": 1024, "class_vocab": {"very_positive": 2, "very_negative": 3}, "default_class": 3, "pretrained_model": "gpt2-medium", }, } def top_k_filter(logits, k, probs=False): """ Masks everything but the k top entries as -infinity (1e10). Used to mask logits such that e^-infinity -> 0 won't contribute to the sum of the denominator. """ if k == 0: return logits else: values = torch.topk(logits, k)[0] batch_mins = values[:, -1].view(-1, 1).expand_as(logits) if probs: return torch.where(logits < batch_mins, torch.ones_like(logits) * 0.0, logits) return torch.where(logits < batch_mins, torch.ones_like(logits) * -BIG_CONST, logits) def perturb_past( past, model, last, unpert_past=None, unpert_logits=None, accumulated_hidden=None, grad_norms=None, stepsize=0.01, one_hot_bows_vectors=None, classifier=None, class_label=None, loss_type=0, num_iterations=3, horizon_length=1, window_length=0, decay=False, gamma=1.5, kl_scale=0.01, device="cuda", ): # Generate inital perturbed past grad_accumulator = [(np.zeros(p.shape).astype("float32")) for p in past] if accumulated_hidden is None: accumulated_hidden = 0 if decay: decay_mask = torch.arange(0.0, 1.0 + SMALL_CONST, 1.0 / (window_length))[1:] else: decay_mask = 1.0 # TODO fix this comment (SUMANTH) # Generate a mask is gradient perturbated is based on a past window _, _, _, curr_length, _ = past[0].shape if curr_length > window_length and window_length > 0: ones_key_val_shape = tuple(past[0].shape[:-2]) + tuple([window_length]) + tuple(past[0].shape[-1:]) zeros_key_val_shape = ( tuple(past[0].shape[:-2]) + tuple([curr_length - window_length]) + tuple(past[0].shape[-1:]) ) ones_mask = torch.ones(ones_key_val_shape) ones_mask = decay_mask * ones_mask.permute(0, 1, 2, 4, 3) ones_mask = ones_mask.permute(0, 1, 2, 4, 3) window_mask = torch.cat((ones_mask, torch.zeros(zeros_key_val_shape)), dim=-2).to(device) else: window_mask = torch.ones_like(past[0]).to(device) # accumulate perturbations for num_iterations loss_per_iter = [] new_accumulated_hidden = None for i in range(num_iterations): print("Iteration ", i + 1) curr_perturbation = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator] # make sure p_.grad is not None for p_ in curr_perturbation: p_.retain_grad() # Compute hidden using perturbed past perturbed_past = list(map(add, past, curr_perturbation)) _, _, _, curr_length, _ = curr_perturbation[0].shape all_logits, _, all_hidden = model(last, past=perturbed_past) hidden = all_hidden[-1] new_accumulated_hidden = accumulated_hidden + torch.sum(hidden, dim=1).detach() # TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth) logits = all_logits[:, -1, :] probs = F.softmax(logits, dim=-1) loss = 0.0 loss_list = [] if loss_type == PPLM_BOW or loss_type == PPLM_BOW_DISCRIM: for one_hot_bow in one_hot_bows_vectors: bow_logits = torch.mm(probs, torch.t(one_hot_bow)) bow_loss = -torch.log(torch.sum(bow_logits)) loss += bow_loss loss_list.append(bow_loss) print(" pplm_bow_loss:", loss.data.cpu().numpy()) if loss_type == 2 or loss_type == 3: ce_loss = torch.nn.CrossEntropyLoss() # TODO why we need to do this assignment and not just using unpert_past? (Sumanth) curr_unpert_past = unpert_past curr_probs = torch.unsqueeze(probs, dim=1) wte = model.resize_token_embeddings() for _ in range(horizon_length): inputs_embeds = torch.matmul(curr_probs, wte.weight.data) _, curr_unpert_past, curr_all_hidden = model(past=curr_unpert_past, inputs_embeds=inputs_embeds) curr_hidden = curr_all_hidden[-1] new_accumulated_hidden = new_accumulated_hidden + torch.sum(curr_hidden, dim=1) prediction = classifier(new_accumulated_hidden / (curr_length + 1 + horizon_length)) label = torch.tensor(prediction.shape[0] * [class_label], device=device, dtype=torch.long) discrim_loss = ce_loss(prediction, label) print(" pplm_discrim_loss:", discrim_loss.data.cpu().numpy()) loss += discrim_loss loss_list.append(discrim_loss) kl_loss = 0.0 if kl_scale > 0.0: unpert_probs = F.softmax(unpert_logits[:, -1, :], dim=-1) unpert_probs = unpert_probs + SMALL_CONST * (unpert_probs <= SMALL_CONST).float().to(device).detach() correction = SMALL_CONST * (probs <= SMALL_CONST).float().to(device).detach() corrected_probs = probs + correction.detach() kl_loss = kl_scale * ((corrected_probs * (corrected_probs / unpert_probs).log()).sum()) print(" kl_loss", kl_loss.data.cpu().numpy()) loss += kl_loss loss_per_iter.append(loss.data.cpu().numpy()) print(" pplm_loss", (loss - kl_loss).data.cpu().numpy()) # compute gradients loss.backward() # calculate gradient norms if grad_norms is not None and loss_type == PPLM_BOW: grad_norms = [ torch.max(grad_norms[index], torch.norm(p_.grad * window_mask)) for index, p_ in enumerate(curr_perturbation) ] else: grad_norms = [ (torch.norm(p_.grad * window_mask) + SMALL_CONST) for index, p_ in enumerate(curr_perturbation) ] # normalize gradients grad = [ -stepsize * (p_.grad * window_mask / grad_norms[index] ** gamma).data.cpu().numpy() for index, p_ in enumerate(curr_perturbation) ] # accumulate gradient grad_accumulator = list(map(add, grad, grad_accumulator)) # reset gradients, just to make sure for p_ in curr_perturbation: p_.grad.data.zero_() # removing past from the graph new_past = [] for p_ in past: new_past.append(p_.detach()) past = new_past # apply the accumulated perturbations to the past grad_accumulator = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator] pert_past = list(map(add, past, grad_accumulator)) return pert_past, new_accumulated_hidden, grad_norms, loss_per_iter def get_classifier( name: Optional[str], class_label: Union[str, int], device: str ) -> Tuple[Optional[ClassificationHead], Optional[int]]: if name is None: return None, None params = DISCRIMINATOR_MODELS_PARAMS[name] classifier = ClassificationHead(class_size=params["class_size"], embed_size=params["embed_size"]).to(device) if "url" in params: resolved_archive_file = cached_path(params["url"]) elif "path" in params: resolved_archive_file = params["path"] else: raise ValueError("Either url or path have to be specified in the discriminator model parameters") classifier.load_state_dict(torch.load(resolved_archive_file, map_location=device)) classifier.eval() if isinstance(class_label, str): if class_label in params["class_vocab"]: label_id = params["class_vocab"][class_label] else: label_id = params["default_class"] print("class_label {} not in class_vocab".format(class_label)) print("available values are: {}".format(params["class_vocab"])) print("using default class {}".format(label_id)) elif isinstance(class_label, int): if class_label in set(params["class_vocab"].values()): label_id = class_label else: label_id = params["default_class"] print("class_label {} not in class_vocab".format(class_label)) print("available values are: {}".format(params["class_vocab"])) print("using default class {}".format(label_id)) else: label_id = params["default_class"] return classifier, label_id def get_bag_of_words_indices(bag_of_words_ids_or_paths: List[str], tokenizer) -> List[List[List[int]]]: bow_indices = [] for id_or_path in bag_of_words_ids_or_paths: if id_or_path in BAG_OF_WORDS_ARCHIVE_MAP: filepath = cached_path(BAG_OF_WORDS_ARCHIVE_MAP[id_or_path]) else: filepath = id_or_path with open(filepath, "r") as f: words = f.read().strip().split("\n") bow_indices.append([tokenizer.encode(word.strip(), add_prefix_space=True) for word in words]) return bow_indices def build_bows_one_hot_vectors(bow_indices, tokenizer, device="cuda"): if bow_indices is None: return None one_hot_bows_vectors = [] for single_bow in bow_indices: single_bow = list(filter(lambda x: len(x) <= 1, single_bow)) single_bow = torch.tensor(single_bow).to(device) num_words = single_bow.shape[0] one_hot_bow = torch.zeros(num_words, tokenizer.vocab_size).to(device) one_hot_bow.scatter_(1, single_bow, 1) one_hot_bows_vectors.append(one_hot_bow) return one_hot_bows_vectors def full_text_generation( model, tokenizer, context=None, num_samples=1, device="cuda", bag_of_words=None, discrim=None, class_label=None, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, repetition_penalty=1.0, **kwargs ): classifier, class_id = get_classifier(discrim, class_label, device) bow_indices = [] if bag_of_words: bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) if bag_of_words and classifier: print("Both PPLM-BoW and PPLM-Discrim are on. This is not optimized.") loss_type = PPLM_BOW_DISCRIM elif bag_of_words: loss_type = PPLM_BOW print("Using PPLM-BoW") elif classifier is not None: loss_type = PPLM_DISCRIM print("Using PPLM-Discrim") else: raise Exception("Specify either a bag of words or a discriminator") unpert_gen_tok_text, _, _ = generate_text_pplm( model=model, tokenizer=tokenizer, context=context, device=device, length=length, sample=sample, perturb=False, repetition_penalty=repetition_penalty, ) if device == "cuda": torch.cuda.empty_cache() pert_gen_tok_texts = [] discrim_losses = [] losses_in_time = [] for i in range(num_samples): pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm( model=model, tokenizer=tokenizer, context=context, device=device, perturb=True, bow_indices=bow_indices, classifier=classifier, class_label=class_id, loss_type=loss_type, length=length, stepsize=stepsize, temperature=temperature, top_k=top_k, sample=sample, num_iterations=num_iterations, grad_length=grad_length, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, gm_scale=gm_scale, kl_scale=kl_scale, repetition_penalty=repetition_penalty, ) pert_gen_tok_texts.append(pert_gen_tok_text) if classifier is not None: discrim_losses.append(discrim_loss.data.cpu().numpy()) losses_in_time.append(loss_in_time) if device == "cuda": torch.cuda.empty_cache() return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time def generate_text_pplm( model, tokenizer, context=None, past=None, device="cuda", perturb=True, bow_indices=None, classifier=None, class_label=None, loss_type=0, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, repetition_penalty=1.0, ): output_so_far = None if context: context_t = torch.tensor(context, device=device, dtype=torch.long) while len(context_t.shape) < 2: context_t = context_t.unsqueeze(0) output_so_far = context_t # collect one hot vectors for bags of words one_hot_bows_vectors = build_bows_one_hot_vectors(bow_indices, tokenizer, device) grad_norms = None last = None unpert_discrim_loss = 0 loss_in_time = [] for i in trange(length, ascii=True): # Get past/probs for current output, except for last word # Note that GPT takes 2 inputs: past + current_token # run model forward to obtain unperturbed if past is None and output_so_far is not None: last = output_so_far[:, -1:] if output_so_far.shape[1] > 1: _, past, _ = model(output_so_far[:, :-1]) unpert_logits, unpert_past, unpert_all_hidden = model(output_so_far) unpert_last_hidden = unpert_all_hidden[-1] # check if we are abowe grad max length if i >= grad_length: current_stepsize = stepsize * 0 else: current_stepsize = stepsize # modify the past if necessary if not perturb or num_iterations == 0: pert_past = past else: accumulated_hidden = unpert_last_hidden[:, :-1, :] accumulated_hidden = torch.sum(accumulated_hidden, dim=1) if past is not None: pert_past, _, grad_norms, loss_this_iter = perturb_past( past, model, last, unpert_past=unpert_past, unpert_logits=unpert_logits, accumulated_hidden=accumulated_hidden, grad_norms=grad_norms, stepsize=current_stepsize, one_hot_bows_vectors=one_hot_bows_vectors, classifier=classifier, class_label=class_label, loss_type=loss_type, num_iterations=num_iterations, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, kl_scale=kl_scale, device=device, ) loss_in_time.append(loss_this_iter) else: pert_past = past pert_logits, past, pert_all_hidden = model(last, past=pert_past) pert_logits = pert_logits[:, -1, :] / temperature # + SMALL_CONST for token_idx in set(output_so_far[0].tolist()): if pert_logits[0, token_idx] < 0: pert_logits[0, token_idx] *= repetition_penalty else: pert_logits[0, token_idx] /= repetition_penalty pert_probs = F.softmax(pert_logits, dim=-1) if classifier is not None: ce_loss = torch.nn.CrossEntropyLoss() prediction = classifier(torch.mean(unpert_last_hidden, dim=1)) label = torch.tensor([class_label], device=device, dtype=torch.long) unpert_discrim_loss = ce_loss(prediction, label) print("unperturbed discrim loss", unpert_discrim_loss.data.cpu().numpy()) else: unpert_discrim_loss = 0 # Fuse the modified model and original model if perturb: unpert_probs = F.softmax(unpert_logits[:, -1, :], dim=-1) pert_probs = (pert_probs ** gm_scale) * (unpert_probs ** (1 - gm_scale)) # + SMALL_CONST pert_probs = top_k_filter(pert_probs, k=top_k, probs=True) # + SMALL_CONST # rescale if torch.sum(pert_probs) <= 1: pert_probs = pert_probs / torch.sum(pert_probs) else: pert_logits = top_k_filter(pert_logits, k=top_k) # + SMALL_CONST pert_probs = F.softmax(pert_logits, dim=-1) # sample or greedy if sample: last = torch.multinomial(pert_probs, num_samples=1) else: _, last = torch.topk(pert_probs, k=1, dim=-1) # update context/output_so_far appending the new token output_so_far = last if output_so_far is None else torch.cat((output_so_far, last), dim=1) print(tokenizer.decode(output_so_far.tolist()[0])) return output_so_far, unpert_discrim_loss, loss_in_time def set_generic_model_params(discrim_weights, discrim_meta): if discrim_weights is None: raise ValueError("When using a generic discriminator, discrim_weights need to be specified") if discrim_meta is None: raise ValueError("When using a generic discriminator, discrim_meta need to be specified") with open(discrim_meta, "r") as discrim_meta_file: meta = json.load(discrim_meta_file) meta["path"] = discrim_weights DISCRIMINATOR_MODELS_PARAMS["generic"] = meta def run_pplm_example( pretrained_model="gpt2-medium", cond_text="", uncond=False, num_samples=1, bag_of_words=None, discrim=None, discrim_weights=None, discrim_meta=None, class_label=-1, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, seed=0, no_cuda=False, colorama=False, repetition_penalty=1.0, ): # set Random seed torch.manual_seed(seed) np.random.seed(seed) # set the device device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" if discrim == "generic": set_generic_model_params(discrim_weights, discrim_meta) if discrim is not None: pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim]["pretrained_model"] print("discrim = {}, pretrained_model set to discriminator's = {}".format(discrim, pretrained_model)) # load pretrained model model = GPT2LMHeadModel.from_pretrained(pretrained_model, output_hidden_states=True) model.to(device) model.eval() # load tokenizer tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) # Freeze GPT-2 weights for param in model.parameters(): param.requires_grad = False # figure out conditioning text if uncond: tokenized_cond_text = tokenizer.encode([tokenizer.bos_token]) else: raw_text = cond_text while not raw_text: print("Did you forget to add `--cond_text`? ") raw_text = input("Model prompt >>> ") tokenized_cond_text = tokenizer.encode(tokenizer.bos_token + raw_text) print("= Prefix of sentence =") print(tokenizer.decode(tokenized_cond_text)) print() # generate unperturbed and perturbed texts # full_text_generation returns: # unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation( model=model, tokenizer=tokenizer, context=tokenized_cond_text, device=device, num_samples=num_samples, bag_of_words=bag_of_words, discrim=discrim, class_label=class_label, length=length, stepsize=stepsize, temperature=temperature, top_k=top_k, sample=sample, num_iterations=num_iterations, grad_length=grad_length, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, gm_scale=gm_scale, kl_scale=kl_scale, repetition_penalty=repetition_penalty, ) # untokenize unperturbed text unpert_gen_text = tokenizer.decode(unpert_gen_tok_text.tolist()[0]) print("=" * 80) print("= Unperturbed generated text =") print(unpert_gen_text) print() generated_texts = [] bow_word_ids = set() if bag_of_words and colorama: bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) for single_bow_list in bow_indices: # filtering all words in the list composed of more than 1 token filtered = list(filter(lambda x: len(x) <= 1, single_bow_list)) # w[0] because we are sure w has only 1 item because previous fitler bow_word_ids.update(w[0] for w in filtered) # iterate through the perturbed texts for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts): try: # untokenize unperturbed text if colorama: import colorama pert_gen_text = "" for word_id in pert_gen_tok_text.tolist()[0]: if word_id in bow_word_ids: pert_gen_text += "{}{}{}".format( colorama.Fore.RED, tokenizer.decode([word_id]), colorama.Style.RESET_ALL, ) else: pert_gen_text += tokenizer.decode([word_id]) else: pert_gen_text = tokenizer.decode(pert_gen_tok_text.tolist()[0]) print("= Perturbed generated text {} =".format(i + 1)) print(pert_gen_text) print() except Exception as exc: print("Ignoring error while generating perturbed text:", exc) # keep the prefix, perturbed seq, original seq for each index generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text)) return if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pretrained_model", "-M", type=str, default="gpt2-medium", help="pretrained model name or path to local checkpoint", ) parser.add_argument("--cond_text", type=str, default="The lake", help="Prefix texts to condition on") parser.add_argument("--uncond", action="store_true", help="Generate from end-of-text as prefix") parser.add_argument( "--num_samples", type=int, default=1, help="Number of samples to generate from the modified latents", ) parser.add_argument( "--bag_of_words", "-B", type=str, default=None, help=( "Bags of words used for PPLM-BoW. " "Either a BOW id (see list in code) or a filepath. " "Multiple BoWs separated by ;" ), ) parser.add_argument( "--discrim", "-D", type=str, default=None, choices=("clickbait", "sentiment", "toxicity", "generic"), help="Discriminator to use", ) parser.add_argument( "--discrim_weights", type=str, default=None, help="Weights for the generic discriminator", ) parser.add_argument( "--discrim_meta", type=str, default=None, help="Meta information for the generic discriminator", ) parser.add_argument( "--class_label", type=int, default=-1, help="Class label used for the discriminator", ) parser.add_argument("--length", type=int, default=100) parser.add_argument("--stepsize", type=float, default=0.02) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--top_k", type=int, default=10) parser.add_argument("--sample", action="store_true", help="Generate from end-of-text as prefix") parser.add_argument("--num_iterations", type=int, default=3) parser.add_argument("--grad_length", type=int, default=10000) parser.add_argument( "--window_length", type=int, default=0, help="Length of past which is being optimized; 0 corresponds to infinite window length", ) parser.add_argument( "--horizon_length", type=int, default=1, help="Length of future to optimize over", ) parser.add_argument("--decay", action="store_true", help="whether to decay or not") parser.add_argument("--gamma", type=float, default=1.5) parser.add_argument("--gm_scale", type=float, default=0.9) parser.add_argument("--kl_scale", type=float, default=0.01) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--no_cuda", action="store_true", help="no cuda") parser.add_argument("--colorama", action="store_true", help="colors keywords") parser.add_argument( "--repetition_penalty", type=float, default=1.0, help="Penalize repetition. More than 1.0 -> less repetition", ) args = parser.parse_args() run_pplm_example(**vars(args))
28,106
34.533502
182
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/text-generation/pplm/run_pplm_discrim_train.py
#! /usr/bin/env python3 # coding=utf-8 # Copyright (c) 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import csv import json import math import time import numpy as np import torch import torch.nn.functional as F import torch.optim as optim import torch.utils.data as data from nltk.tokenize.treebank import TreebankWordDetokenizer from torchtext import data as torchtext_data from torchtext import datasets from tqdm import tqdm, trange from pplm_classification_head import ClassificationHead from transformers import GPT2LMHeadModel, GPT2Tokenizer torch.manual_seed(0) np.random.seed(0) EPSILON = 1e-10 example_sentence = "This is incredible! I love it, this is the best chicken I have ever had." max_length_seq = 100 class Discriminator(torch.nn.Module): """Transformer encoder followed by a Classification Head""" def __init__(self, class_size, pretrained_model="gpt2-medium", cached_mode=False, device="cpu"): super().__init__() self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) self.encoder = GPT2LMHeadModel.from_pretrained(pretrained_model) self.embed_size = self.encoder.transformer.config.hidden_size self.classifier_head = ClassificationHead(class_size=class_size, embed_size=self.embed_size) self.cached_mode = cached_mode self.device = device def get_classifier(self): return self.classifier_head def train_custom(self): for param in self.encoder.parameters(): param.requires_grad = False self.classifier_head.train() def avg_representation(self, x): mask = x.ne(0).unsqueeze(2).repeat(1, 1, self.embed_size).float().to(self.device).detach() hidden, _ = self.encoder.transformer(x) masked_hidden = hidden * mask avg_hidden = torch.sum(masked_hidden, dim=1) / (torch.sum(mask, dim=1).detach() + EPSILON) return avg_hidden def forward(self, x): if self.cached_mode: avg_hidden = x.to(self.device) else: avg_hidden = self.avg_representation(x.to(self.device)) logits = self.classifier_head(avg_hidden) probs = F.log_softmax(logits, dim=-1) return probs class Dataset(data.Dataset): def __init__(self, X, y): """Reads source and target sequences from txt files.""" self.X = X self.y = y def __len__(self): return len(self.X) def __getitem__(self, index): """Returns one data pair (source and target).""" data = {} data["X"] = self.X[index] data["y"] = self.y[index] return data def collate_fn(data): def pad_sequences(sequences): lengths = [len(seq) for seq in sequences] padded_sequences = torch.zeros(len(sequences), max(lengths)).long() # padding value = 0 for i, seq in enumerate(sequences): end = lengths[i] padded_sequences[i, :end] = seq[:end] return padded_sequences, lengths item_info = {} for key in data[0].keys(): item_info[key] = [d[key] for d in data] x_batch, _ = pad_sequences(item_info["X"]) y_batch = torch.tensor(item_info["y"], dtype=torch.long) return x_batch, y_batch def cached_collate_fn(data): item_info = {} for key in data[0].keys(): item_info[key] = [d[key] for d in data] x_batch = torch.cat(item_info["X"], 0) y_batch = torch.tensor(item_info["y"], dtype=torch.long) return x_batch, y_batch def train_epoch(data_loader, discriminator, optimizer, epoch=0, log_interval=10, device="cpu"): samples_so_far = 0 discriminator.train_custom() for batch_idx, (input_t, target_t) in enumerate(data_loader): input_t, target_t = input_t.to(device), target_t.to(device) optimizer.zero_grad() output_t = discriminator(input_t) loss = F.nll_loss(output_t, target_t) loss.backward(retain_graph=True) optimizer.step() samples_so_far += len(input_t) if batch_idx % log_interval == 0: print( "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( epoch + 1, samples_so_far, len(data_loader.dataset), 100 * samples_so_far / len(data_loader.dataset), loss.item(), ) ) def evaluate_performance(data_loader, discriminator, device="cpu"): discriminator.eval() test_loss = 0 correct = 0 with torch.no_grad(): for input_t, target_t in data_loader: input_t, target_t = input_t.to(device), target_t.to(device) output_t = discriminator(input_t) # sum up batch loss test_loss += F.nll_loss(output_t, target_t, reduction="sum").item() # get the index of the max log-probability pred_t = output_t.argmax(dim=1, keepdim=True) correct += pred_t.eq(target_t.view_as(pred_t)).sum().item() test_loss /= len(data_loader.dataset) print( "Performance on test set: " "Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format( test_loss, correct, len(data_loader.dataset), 100.0 * correct / len(data_loader.dataset) ) ) def predict(input_sentence, model, classes, cached=False, device="cpu"): input_t = model.tokenizer.encode(input_sentence) input_t = torch.tensor([input_t], dtype=torch.long, device=device) if cached: input_t = model.avg_representation(input_t) log_probs = model(input_t).data.cpu().numpy().flatten().tolist() print("Input sentence:", input_sentence) print( "Predictions:", ", ".join("{}: {:.4f}".format(c, math.exp(log_prob)) for c, log_prob in zip(classes, log_probs)), ) def get_cached_data_loader(dataset, batch_size, discriminator, shuffle=False, device="cpu"): data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, collate_fn=collate_fn) xs = [] ys = [] for batch_idx, (x, y) in enumerate(tqdm(data_loader, ascii=True)): with torch.no_grad(): x = x.to(device) avg_rep = discriminator.avg_representation(x).cpu().detach() avg_rep_list = torch.unbind(avg_rep.unsqueeze(1)) xs += avg_rep_list ys += y.cpu().numpy().tolist() data_loader = torch.utils.data.DataLoader( dataset=Dataset(xs, ys), batch_size=batch_size, shuffle=shuffle, collate_fn=cached_collate_fn ) return data_loader def train_discriminator( dataset, dataset_fp=None, pretrained_model="gpt2-medium", epochs=10, batch_size=64, log_interval=10, save_model=False, cached=False, no_cuda=False, ): device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" print("Preprocessing {} dataset...".format(dataset)) start = time.time() if dataset == "SST": idx2class = ["positive", "negative", "very positive", "very negative", "neutral"] class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) text = torchtext_data.Field() label = torchtext_data.Field(sequential=False) train_data, val_data, test_data = datasets.SST.splits(text, label, fine_grained=True, train_subtrees=True,) x = [] y = [] for i in trange(len(train_data), ascii=True): seq = TreebankWordDetokenizer().detokenize(vars(train_data[i])["text"]) seq = discriminator.tokenizer.encode(seq) seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) x.append(seq) y.append(class2idx[vars(train_data[i])["label"]]) train_dataset = Dataset(x, y) test_x = [] test_y = [] for i in trange(len(test_data), ascii=True): seq = TreebankWordDetokenizer().detokenize(vars(test_data[i])["text"]) seq = discriminator.tokenizer.encode(seq) seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) test_x.append(seq) test_y.append(class2idx[vars(test_data[i])["label"]]) test_dataset = Dataset(test_x, test_y) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 2, } elif dataset == "clickbait": idx2class = ["non_clickbait", "clickbait"] class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) with open("datasets/clickbait/clickbait_train_prefix.txt") as f: data = [] for i, line in enumerate(f): try: data.append(eval(line)) except Exception: print("Error evaluating line {}: {}".format(i, line)) continue x = [] y = [] with open("datasets/clickbait/clickbait_train_prefix.txt") as f: for i, line in enumerate(tqdm(f, ascii=True)): try: d = eval(line) seq = discriminator.tokenizer.encode(d["text"]) if len(seq) < max_length_seq: seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) else: print("Line {} is longer than maximum length {}".format(i, max_length_seq)) continue x.append(seq) y.append(d["label"]) except Exception: print("Error evaluating / tokenizing" " line {}, skipping it".format(i)) pass full_dataset = Dataset(x, y) train_size = int(0.9 * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 1, } elif dataset == "toxic": idx2class = ["non_toxic", "toxic"] class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) x = [] y = [] with open("datasets/toxic/toxic_train.txt") as f: for i, line in enumerate(tqdm(f, ascii=True)): try: d = eval(line) seq = discriminator.tokenizer.encode(d["text"]) if len(seq) < max_length_seq: seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) else: print("Line {} is longer than maximum length {}".format(i, max_length_seq)) continue x.append(seq) y.append(int(np.sum(d["label"]) > 0)) except Exception: print("Error evaluating / tokenizing" " line {}, skipping it".format(i)) pass full_dataset = Dataset(x, y) train_size = int(0.9 * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 0, } else: # if dataset == "generic": # This assumes the input dataset is a TSV with the following structure: # class \t text if dataset_fp is None: raise ValueError("When generic dataset is selected, " "dataset_fp needs to be specified aswell.") classes = set() with open(dataset_fp) as f: csv_reader = csv.reader(f, delimiter="\t") for row in tqdm(csv_reader, ascii=True): if row: classes.add(row[0]) idx2class = sorted(classes) class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) x = [] y = [] with open(dataset_fp) as f: csv_reader = csv.reader(f, delimiter="\t") for i, row in enumerate(tqdm(csv_reader, ascii=True)): if row: label = row[0] text = row[1] try: seq = discriminator.tokenizer.encode(text) if len(seq) < max_length_seq: seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) else: print("Line {} is longer than maximum length {}".format(i, max_length_seq)) continue x.append(seq) y.append(class2idx[label]) except Exception: print("Error tokenizing line {}, skipping it".format(i)) pass full_dataset = Dataset(x, y) train_size = int(0.9 * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 0, } end = time.time() print("Preprocessed {} data points".format(len(train_dataset) + len(test_dataset))) print("Data preprocessing took: {:.3f}s".format(end - start)) if cached: print("Building representation cache...") start = time.time() train_loader = get_cached_data_loader(train_dataset, batch_size, discriminator, shuffle=True, device=device) test_loader = get_cached_data_loader(test_dataset, batch_size, discriminator, device=device) end = time.time() print("Building representation cache took: {:.3f}s".format(end - start)) else: train_loader = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn ) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, collate_fn=collate_fn) if save_model: with open("{}_classifier_head_meta.json".format(dataset), "w") as meta_file: json.dump(discriminator_meta, meta_file) optimizer = optim.Adam(discriminator.parameters(), lr=0.0001) for epoch in range(epochs): start = time.time() print("\nEpoch", epoch + 1) train_epoch( discriminator=discriminator, data_loader=train_loader, optimizer=optimizer, epoch=epoch, log_interval=log_interval, device=device, ) evaluate_performance(data_loader=test_loader, discriminator=discriminator, device=device) end = time.time() print("Epoch took: {:.3f}s".format(end - start)) print("\nExample prediction") predict(example_sentence, discriminator, idx2class, cached=cached, device=device) if save_model: # torch.save(discriminator.state_dict(), # "{}_discriminator_{}.pt".format( # args.dataset, epoch + 1 # )) torch.save( discriminator.get_classifier().state_dict(), "{}_classifier_head_epoch_{}.pt".format(dataset, epoch + 1), ) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Train a discriminator on top of GPT-2 representations") parser.add_argument( "--dataset", type=str, default="SST", choices=("SST", "clickbait", "toxic", "generic"), help="dataset to train the discriminator on." "In case of generic, the dataset is expected" "to be a TSBV file with structure: class \\t text", ) parser.add_argument( "--dataset_fp", type=str, default="", help="File path of the dataset to use. " "Needed only in case of generic datadset", ) parser.add_argument( "--pretrained_model", type=str, default="gpt2-medium", help="Pretrained model to use as encoder" ) parser.add_argument("--epochs", type=int, default=10, metavar="N", help="Number of training epochs") parser.add_argument( "--batch_size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)" ) parser.add_argument( "--log_interval", type=int, default=10, metavar="N", help="how many batches to wait before logging training status", ) parser.add_argument("--save_model", action="store_true", help="whether to save the model") parser.add_argument("--cached", action="store_true", help="whether to cache the input representations") parser.add_argument("--no_cuda", action="store_true", help="use to turn off cuda") args = parser.parse_args() train_discriminator(**(vars(args)))
18,693
35.088803
117
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/text-generation/pplm/pplm_classification_head.py
import torch class ClassificationHead(torch.nn.Module): """Classification Head for transformer encoders""" def __init__(self, class_size, embed_size): super().__init__() self.class_size = class_size self.embed_size = embed_size # self.mlp1 = torch.nn.Linear(embed_size, embed_size) # self.mlp2 = (torch.nn.Linear(embed_size, class_size)) self.mlp = torch.nn.Linear(embed_size, class_size) def forward(self, hidden_state): # hidden_state = F.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) logits = self.mlp(hidden_state) return logits
655
31.8
63
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/multiple-choice/utils_multiple_choice.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension """ import csv import glob import json import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional import tqdm from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available logger = logging.getLogger(__name__) @dataclass(frozen=True) class InputExample: """ A single training/test example for multiple choice Args: example_id: Unique id for the example. question: string. The untokenized text of the second sequence (question). contexts: list of str. The untokenized text of the first sequence (context of corresponding question). endings: list of str. multiple choice's options. Its length must be equal to contexts' length. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ example_id: str question: str contexts: List[str] endings: List[str] label: Optional[str] @dataclass(frozen=True) class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. """ example_id: str input_ids: List[List[int]] attention_mask: Optional[List[List[int]]] token_type_ids: Optional[List[List[int]]] label: Optional[int] class Split(Enum): train = "train" dev = "dev" test = "test" if is_torch_available(): import torch from torch.utils.data.dataset import Dataset class MultipleChoiceDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = None, overwrite_cache=False, mode: Split = Split.train, ): processor = processors[task]() cached_features_file = os.path.join( data_dir, "cached_{}_{}_{}_{}".format(mode.value, tokenizer.__class__.__name__, str(max_seq_length), task,), ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}") self.features = torch.load(cached_features_file) else: logger.info(f"Creating features from dataset file at {data_dir}") label_list = processor.get_labels() if mode == Split.dev: examples = processor.get_dev_examples(data_dir) elif mode == Split.test: examples = processor.get_test_examples(data_dir) else: examples = processor.get_train_examples(data_dir) logger.info("Training examples: %s", len(examples)) self.features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer,) logger.info("Saving features into cached file %s", cached_features_file) torch.save(self.features, cached_features_file) def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] if is_tf_available(): import tensorflow as tf class TFMultipleChoiceDataset: """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = 128, overwrite_cache=False, mode: Split = Split.train, ): processor = processors[task]() logger.info(f"Creating features from dataset file at {data_dir}") label_list = processor.get_labels() if mode == Split.dev: examples = processor.get_dev_examples(data_dir) elif mode == Split.test: examples = processor.get_test_examples(data_dir) else: examples = processor.get_train_examples(data_dir) logger.info("Training examples: %s", len(examples)) self.features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer,) def gen(): for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) self.dataset = tf.data.Dataset.from_generator( gen, ( { "example_id": tf.int32, "input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32, }, tf.int64, ), ( { "example_id": tf.TensorShape([]), "input_ids": tf.TensorShape([None, None]), "attention_mask": tf.TensorShape([None, None]), "token_type_ids": tf.TensorShape([None, None]), }, tf.TensorShape([]), ), ) def get_dataset(self): return self.dataset def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] class DataProcessor: """Base class for data converters for multiple choice data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for the test set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() class RaceProcessor(DataProcessor): """Processor for the RACE data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) high = os.path.join(data_dir, "train/high") middle = os.path.join(data_dir, "train/middle") high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) high = os.path.join(data_dir, "dev/high") middle = os.path.join(data_dir, "dev/middle") high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} test".format(data_dir)) high = os.path.join(data_dir, "test/high") middle = os.path.join(data_dir, "test/middle") high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_txt(self, input_dir): lines = [] files = glob.glob(input_dir + "/*txt") for file in tqdm.tqdm(files, desc="read files"): with open(file, "r", encoding="utf-8") as fin: data_raw = json.load(fin) data_raw["race_id"] = file lines.append(data_raw) return lines def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (_, data_raw) in enumerate(lines): race_id = "%s-%s" % (set_type, data_raw["race_id"]) article = data_raw["article"] for i in range(len(data_raw["answers"])): truth = str(ord(data_raw["answers"][i]) - ord("A")) question = data_raw["questions"][i] options = data_raw["options"][i] examples.append( InputExample( example_id=race_id, question=question, contexts=[article, article, article, article], # this is not efficient but convenient endings=[options[0], options[1], options[2], options[3]], label=truth, ) ) return examples class SynonymProcessor(DataProcessor): """Processor for the Synonym data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "mctrain.csv")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "mchp.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "mctest.csv")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3", "4"] def _read_csv(self, input_file): with open(input_file, "r", encoding="utf-8") as f: return list(csv.reader(f)) def _create_examples(self, lines: List[List[str]], type: str): """Creates examples for the training and dev sets.""" examples = [ InputExample( example_id=line[0], question="", # in the swag dataset, the # common beginning of each # choice is stored in "sent2". contexts=[line[1], line[1], line[1], line[1], line[1]], endings=[line[2], line[3], line[4], line[5], line[6]], label=line[7], ) for line in lines # we skip the line with the column names ] return examples class SwagProcessor(DataProcessor): """Processor for the SWAG data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) raise ValueError( "For swag testing, the input file does not contain a label column. It can not be tested in current code" "setting!" ) return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_csv(self, input_file): with open(input_file, "r", encoding="utf-8") as f: return list(csv.reader(f)) def _create_examples(self, lines: List[List[str]], type: str): """Creates examples for the training and dev sets.""" if type == "train" and lines[0][-1] != "label": raise ValueError("For training, the input file must contain a label column.") examples = [ InputExample( example_id=line[2], question=line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". contexts=[line[4], line[4], line[4], line[4]], endings=[line[7], line[8], line[9], line[10]], label=line[11], ) for line in lines[1:] # we skip the line with the column names ] return examples class ArcProcessor(DataProcessor): """Processor for the ARC data set (request from allennlp).""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev") def get_test_examples(self, data_dir): logger.info("LOOKING AT {} test".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_json(self, input_file): with open(input_file, "r", encoding="utf-8") as fin: lines = fin.readlines() return lines def _create_examples(self, lines, type): """Creates examples for the training and dev sets.""" # There are two types of labels. They should be normalized def normalize(truth): if truth in "ABCD": return ord(truth) - ord("A") elif truth in "1234": return int(truth) - 1 else: logger.info("truth ERROR! %s", str(truth)) return None examples = [] three_choice = 0 four_choice = 0 five_choice = 0 other_choices = 0 # we deleted example which has more than or less than four choices for line in tqdm.tqdm(lines, desc="read arc data"): data_raw = json.loads(line.strip("\n")) if len(data_raw["question"]["choices"]) == 3: three_choice += 1 continue elif len(data_raw["question"]["choices"]) == 5: five_choice += 1 continue elif len(data_raw["question"]["choices"]) != 4: other_choices += 1 continue four_choice += 1 truth = str(normalize(data_raw["answerKey"])) assert truth != "None" question_choices = data_raw["question"] question = question_choices["stem"] id = data_raw["id"] options = question_choices["choices"] if len(options) == 4: examples.append( InputExample( example_id=id, question=question, contexts=[ options[0]["para"].replace("_", ""), options[1]["para"].replace("_", ""), options[2]["para"].replace("_", ""), options[3]["para"].replace("_", ""), ], endings=[options[0]["text"], options[1]["text"], options[2]["text"], options[3]["text"]], label=truth, ) ) if type == "train": assert len(examples) > 1 assert examples[0].label is not None logger.info("len examples: %s}", str(len(examples))) logger.info("Three choices: %s", str(three_choice)) logger.info("Five choices: %s", str(five_choice)) logger.info("Other choices: %s", str(other_choices)) logger.info("four choices: %s", str(four_choice)) return examples def convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, ) -> List[InputFeatures]: """ Loads a data file into a list of `InputFeatures` """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) choices_inputs = [] for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)): text_a = context if example.question.find("_") != -1: # this is for cloze question text_b = example.question.replace("_", ending) else: text_b = example.question + " " + ending inputs = tokenizer( text_a, text_b, add_special_tokens=True, max_length=max_length, padding="max_length", truncation=True, return_overflowing_tokens=True, ) if "num_truncated_tokens" in inputs and inputs["num_truncated_tokens"] > 0: logger.info( "Attention! you are cropping tokens (swag task is ok). " "If you are training ARC and RACE and you are poping question + options," "you need to try to use a bigger max seq length!" ) choices_inputs.append(inputs) label = label_map[example.label] input_ids = [x["input_ids"] for x in choices_inputs] attention_mask = ( [x["attention_mask"] for x in choices_inputs] if "attention_mask" in choices_inputs[0] else None ) token_type_ids = ( [x["token_type_ids"] for x in choices_inputs] if "token_type_ids" in choices_inputs[0] else None ) features.append( InputFeatures( example_id=example.example_id, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label, ) ) for f in features[:2]: logger.info("*** Example ***") logger.info("feature: %s" % f) return features processors = {"race": RaceProcessor, "swag": SwagProcessor, "arc": ArcProcessor, "syn": SynonymProcessor} MULTIPLE_CHOICE_TASKS_NUM_LABELS = {"race", 4, "swag", 4, "arc", 4, "syn", 5}
20,484
35.580357
116
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/movement-pruning/counts_parameters.py
# Copyright 2020-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Count remaining (non-zero) weights in the encoder (i.e. the transformer layers). Sparsity and remaining weights levels are equivalent: sparsity % = 100 - remaining weights %. """ import argparse import os import torch from emmental.modules import ThresholdBinarizer, TopKBinarizer def main(args): serialization_dir = args.serialization_dir pruning_method = args.pruning_method threshold = args.threshold st = torch.load(os.path.join(serialization_dir, "pytorch_model.bin"), map_location="cpu") remaining_count = 0 # Number of remaining (not pruned) params in the encoder encoder_count = 0 # Number of params in the encoder print("name".ljust(60, " "), "Remaining Weights %", "Remaning Weight") for name, param in st.items(): if "encoder" not in name: continue if "mask_scores" in name: if pruning_method == "topK": mask_ones = TopKBinarizer.apply(param, threshold).sum().item() elif pruning_method == "sigmoied_threshold": mask_ones = ThresholdBinarizer.apply(param, threshold, True).sum().item() elif pruning_method == "l0": l, r = -0.1, 1.1 s = torch.sigmoid(param) s_bar = s * (r - l) + l mask = s_bar.clamp(min=0.0, max=1.0) mask_ones = (mask > 0.0).sum().item() else: raise ValueError("Unknown pruning method") remaining_count += mask_ones print(name.ljust(60, " "), str(round(100 * mask_ones / param.numel(), 3)).ljust(20, " "), str(mask_ones)) else: encoder_count += param.numel() if "bias" in name or "LayerNorm" in name: remaining_count += param.numel() print("") print("Remaining Weights (global) %: ", 100 * remaining_count / encoder_count) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "topK", "sigmoied_threshold"], type=str, required=True, help="Pruning Method (l0 = L0 regularization, topK = Movement pruning, sigmoied_threshold = Soft movement pruning)", ) parser.add_argument( "--threshold", type=float, required=False, help="For `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`", ) parser.add_argument( "--serialization_dir", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) args = parser.parse_args() main(args)
3,394
35.505376
124
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/movement-pruning/masked_run_glue.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-pruning Masked BERT on sequence classification on GLUE.""" import argparse import glob import json import logging import os import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from emmental import MaskedBertConfig, MaskedBertForSequenceClassification from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertForSequenceClassification, BertTokenizer, get_linear_schedule_with_warmup, ) from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, BertForSequenceClassification, BertTokenizer), "masked_bert": (MaskedBertConfig, MaskedBertForSequenceClassification, BertTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def schedule_threshold( step: int, total_step: int, warmup_steps: int, initial_threshold: float, final_threshold: float, initial_warmup: int, final_warmup: int, final_lambda: float, ): if step <= initial_warmup * warmup_steps: threshold = initial_threshold elif step > (total_step - final_warmup * warmup_steps): threshold = final_threshold else: spars_warmup_steps = initial_warmup * warmup_steps spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps) threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff ** 3) regu_lambda = final_lambda * threshold / final_threshold return threshold, regu_lambda def regularization(model: nn.Module, mode: str): regu, counter = 0, 0 for name, param in model.named_parameters(): if "mask_scores" in name: if mode == "l1": regu += torch.norm(torch.sigmoid(param), p=1) / param.numel() elif mode == "l0": regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel() else: ValueError("Don't know this mode.") counter += 1 return regu / counter def train(args, train_dataset, model, tokenizer, teacher=None): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter(log_dir=args.output_dir) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad], "lr": args.mask_scores_learning_rate, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) # Distillation if teacher is not None: logger.info(" Training with distillation") global_step = 0 # Global TopK if args.global_topk: threshold_mem = None epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to global_step of last saved checkpoint from model path try: global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) except ValueError: global_step = 0 epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) # Added here for reproductibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) threshold, regu_lambda = schedule_threshold( step=global_step, total_step=t_total, warmup_steps=args.warmup_steps, final_threshold=args.final_threshold, initial_threshold=args.initial_threshold, final_warmup=args.final_warmup, initial_warmup=args.initial_warmup, final_lambda=args.final_lambda, ) # Global TopK if args.global_topk: if threshold == 1.0: threshold = -1e2 # Or an indefinitely low quantity else: if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0): # Sort all the values to get the global topK concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() threshold = threshold_mem else: threshold = threshold_mem inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids if "masked" in args.model_type: inputs["threshold"] = threshold outputs = model(**inputs) loss, logits_stu = outputs # model outputs are always tuple in transformers (see doc) # Distillation loss if teacher is not None: if "token_type_ids" not in inputs: inputs["token_type_ids"] = None if args.teacher_type == "xlm" else batch[2] with torch.no_grad(): (logits_tea,) = teacher( input_ids=inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], ) loss_logits = F.kl_div( input=F.log_softmax(logits_stu / args.temperature, dim=-1), target=F.softmax(logits_tea / args.temperature, dim=-1), reduction="batchmean", ) * (args.temperature ** 2) loss = args.alpha_distil * loss_logits + args.alpha_ce * loss # Regularization if args.regularization is not None: regu_ = regularization(model=model, mode=args.regularization) loss = loss + regu_lambda * regu_ if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps len(epoch_iterator) <= args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator) ): if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: tb_writer.add_scalar("threshold", threshold, global_step) for name, param in model.named_parameters(): if not param.requires_grad: continue tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step) tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step) tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step) tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step) tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step) tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step) if args.regularization is not None and "mask_scores" in name: if args.regularization == "l1": perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel() elif args.regularization == "l0": perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel() tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr() logs["learning_rate"] = learning_rate_scalar[0] if len(learning_rate_scalar) > 1: for idx, lr in enumerate(learning_rate_scalar[1:]): logs[f"learning_rate/{idx+1}"] = lr logs["loss"] = loss_scalar if teacher is not None: logs["loss/distil"] = loss_logits.item() if args.regularization is not None: logs["loss/regularization"] = regu_.item() if (teacher is not None) or (args.regularization is not None): if (teacher is not None) and (args.regularization is not None): logs["loss/instant_ce"] = ( loss.item() - regu_lambda * logs["loss/regularization"] - args.alpha_distil * logs["loss/distil"] ) / args.alpha_ce elif teacher is not None: logs["loss/instant_ce"] = ( loss.item() - args.alpha_distil * logs["loss/distil"] ) / args.alpha_ce else: logs["loss/instant_ce"] = loss.item() - regu_lambda * logs["loss/regularization"] logging_loss = tr_loss for key, value in logs.items(): tb_writer.add_scalar(key, value, global_step) print(json.dumps({**logs, **{"step": global_step}})) if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + "/MM") if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None # Global TopK if args.global_topk: threshold_mem = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids if "masked" in args.model_type: inputs["threshold"] = args.final_threshold if args.global_topk: if threshold_mem is None: concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * args.final_threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() inputs["threshold"] = threshold_mem outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": from scipy.special import softmax probs = softmax(preds, axis=-1) entropy = np.exp((-probs * np.log(probs)).sum(axis=-1).mean()) preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) if entropy is not None: result["eval_avg_entropy"] = entropy output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = ( processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, tokenizer, max_length=args.max_seq_length, label_list=label_list, output_mode=output_mode, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.", ) parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.", ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.", ) parser.add_argument( "--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.", ) parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") # Pruning parameters parser.add_argument( "--mask_scores_learning_rate", default=1e-2, type=float, help="The Adam initial learning rate of the mask scores.", ) parser.add_argument( "--initial_threshold", default=1.0, type=float, help="Initial value of the threshold (for scheduling)." ) parser.add_argument( "--final_threshold", default=0.7, type=float, help="Final value of the threshold (for scheduling)." ) parser.add_argument( "--initial_warmup", default=1, type=int, help="Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays" "at its `initial_threshold` value (sparsity schedule).", ) parser.add_argument( "--final_warmup", default=2, type=int, help="Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays" "at its final_threshold value (sparsity schedule).", ) parser.add_argument( "--pruning_method", default="topK", type=str, help="Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning, sigmoied_threshold = Soft movement pruning).", ) parser.add_argument( "--mask_init", default="constant", type=str, help="Initialization method for the mask scores. Choices: constant, uniform, kaiming.", ) parser.add_argument( "--mask_scale", default=0.0, type=float, help="Initialization parameter for the chosen initialization method." ) parser.add_argument("--regularization", default=None, help="Add L0 or L1 regularization to the mask scores.") parser.add_argument( "--final_lambda", default=0.0, type=float, help="Regularization intensity (used in conjunction with `regulariation`.", ) parser.add_argument("--global_topk", action="store_true", help="Global TopK on the Scores.") parser.add_argument( "--global_topk_frequency_compute", default=25, type=int, help="Frequency at which we compute the TopK global threshold.", ) # Distillation parameters (optional) parser.add_argument( "--teacher_type", default=None, type=str, help="Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.", ) parser.add_argument( "--teacher_name_or_path", default=None, type=str, help="Path to the already fine-tuned teacher model. Only for distillation.", ) parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Cross entropy loss linear weight. Only for distillation." ) parser.add_argument( "--alpha_distil", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." ) parser.add_argument( "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.", ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() # Regularization if args.regularization == "null": args.regularization = None if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( f"Output directory ({args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None, pruning_method=args.pruning_method, mask_init=args.mask_init, mask_scale=args.mask_scale, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, do_lower_case=args.do_lower_case, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_distil > 0.0 assert args.alpha_distil + args.alpha_ce > 0.0 teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) teacher = teacher_model_class.from_pretrained( args.teacher_name_or_path, from_tf=False, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None, ) teacher.to(args.device) else: teacher = None if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + "_{}".format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
40,389
42.854506
156
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/movement-pruning/bertarize.py
# Copyright 2020-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Once a model has been fine-pruned, the weights that are masked during the forward pass can be pruned once for all. For instance, once the a model from the :class:`~emmental.MaskedBertForSequenceClassification` is trained, it can be saved (and then loaded) as a standard :class:`~transformers.BertForSequenceClassification`. """ import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def main(args): pruning_method = args.pruning_method threshold = args.threshold model_name_or_path = args.model_name_or_path.rstrip("/") target_model_path = args.target_model_path print(f"Load fine-pruned model from {model_name_or_path}") model = torch.load(os.path.join(model_name_or_path, "pytorch_model.bin")) pruned_model = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: pruned_model[name] = tensor print(f"Copied layer {name}") elif "classifier" in name or "qa_output" in name: pruned_model[name] = tensor print(f"Copied layer {name}") elif "bias" in name: pruned_model[name] = tensor print(f"Copied layer {name}") else: if pruning_method == "magnitude": mask = MagnitudeBinarizer.apply(inputs=tensor, threshold=threshold) pruned_model[name] = tensor * mask print(f"Pruned layer {name}") elif pruning_method == "topK": if "mask_scores" in name: continue prefix_ = name[:-6] scores = model[f"{prefix_}mask_scores"] mask = TopKBinarizer.apply(scores, threshold) pruned_model[name] = tensor * mask print(f"Pruned layer {name}") elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue prefix_ = name[:-6] scores = model[f"{prefix_}mask_scores"] mask = ThresholdBinarizer.apply(scores, threshold, True) pruned_model[name] = tensor * mask print(f"Pruned layer {name}") elif pruning_method == "l0": if "mask_scores" in name: continue prefix_ = name[:-6] scores = model[f"{prefix_}mask_scores"] l, r = -0.1, 1.1 s = torch.sigmoid(scores) s_bar = s * (r - l) + l mask = s_bar.clamp(min=0.0, max=1.0) pruned_model[name] = tensor * mask print(f"Pruned layer {name}") else: raise ValueError("Unknown pruning method") if target_model_path is None: target_model_path = os.path.join( os.path.dirname(model_name_or_path), f"bertarized_{os.path.basename(model_name_or_path)}" ) if not os.path.isdir(target_model_path): shutil.copytree(model_name_or_path, target_model_path) print(f"\nCreated folder {target_model_path}") torch.save(pruned_model, os.path.join(target_model_path, "pytorch_model.bin")) print("\nPruned model saved! See you later!") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help="Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning, sigmoied_threshold = Soft movement pruning)", ) parser.add_argument( "--threshold", type=float, required=False, help="For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`", ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) args = parser.parse_args() main(args)
5,086
37.24812
155
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/movement-pruning/masked_run_squad.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-pruning Masked BERT for question-answering on SQuAD.""" import argparse import glob import logging import os import random import timeit import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from emmental import MaskedBertConfig, MaskedBertForQuestionAnswering from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertForQuestionAnswering, BertTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, BertForQuestionAnswering, BertTokenizer), "masked_bert": (MaskedBertConfig, MaskedBertForQuestionAnswering, BertTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def schedule_threshold( step: int, total_step: int, warmup_steps: int, initial_threshold: float, final_threshold: float, initial_warmup: int, final_warmup: int, final_lambda: float, ): if step <= initial_warmup * warmup_steps: threshold = initial_threshold elif step > (total_step - final_warmup * warmup_steps): threshold = final_threshold else: spars_warmup_steps = initial_warmup * warmup_steps spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps) threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff ** 3) regu_lambda = final_lambda * threshold / final_threshold return threshold, regu_lambda def regularization(model: nn.Module, mode: str): regu, counter = 0, 0 for name, param in model.named_parameters(): if "mask_scores" in name: if mode == "l1": regu += torch.norm(torch.sigmoid(param), p=1) / param.numel() elif mode == "l0": regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel() else: ValueError("Don't know this mode.") counter += 1 return regu / counter def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer, teacher=None): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter(log_dir=args.output_dir) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad], "lr": args.mask_scores_learning_rate, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) # Distillation if teacher is not None: logger.info(" Training with distillation") global_step = 1 # Global TopK if args.global_topk: threshold_mem = None epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to global_step of last saved checkpoint from model path try: checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproductibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) threshold, regu_lambda = schedule_threshold( step=global_step, total_step=t_total, warmup_steps=args.warmup_steps, final_threshold=args.final_threshold, initial_threshold=args.initial_threshold, final_warmup=args.final_warmup, initial_warmup=args.initial_warmup, final_lambda=args.final_lambda, ) # Global TopK if args.global_topk: if threshold == 1.0: threshold = -1e2 # Or an indefinitely low quantity else: if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0): # Sort all the values to get the global topK concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() threshold = threshold_mem else: threshold = threshold_mem inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) if "masked" in args.model_type: inputs["threshold"] = threshold outputs = model(**inputs) # model outputs are always tuple in transformers (see doc) loss, start_logits_stu, end_logits_stu = outputs # Distillation loss if teacher is not None: with torch.no_grad(): start_logits_tea, end_logits_tea = teacher( input_ids=inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], ) loss_start = F.kl_div( input=F.log_softmax(start_logits_stu / args.temperature, dim=-1), target=F.softmax(start_logits_tea / args.temperature, dim=-1), reduction="batchmean", ) * (args.temperature ** 2) loss_end = F.kl_div( input=F.log_softmax(end_logits_stu / args.temperature, dim=-1), target=F.softmax(end_logits_tea / args.temperature, dim=-1), reduction="batchmean", ) * (args.temperature ** 2) loss_logits = (loss_start + loss_end) / 2.0 loss = args.alpha_distil * loss_logits + args.alpha_ce * loss # Regularization if args.regularization is not None: regu_ = regularization(model=model, mode=args.regularization) loss = loss + regu_lambda * regu_ if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: tb_writer.add_scalar("threshold", threshold, global_step) for name, param in model.named_parameters(): if not param.requires_grad: continue tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step) tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step) tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step) tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step) if "pooler" in name: continue tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step) tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step) if args.regularization is not None and "mask_scores" in name: if args.regularization == "l1": perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel() elif args.regularization == "l0": perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel() tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) learning_rate_scalar = scheduler.get_lr() tb_writer.add_scalar("lr", learning_rate_scalar[0], global_step) if len(learning_rate_scalar) > 1: for idx, lr in enumerate(learning_rate_scalar[1:]): tb_writer.add_scalar(f"lr/{idx+1}", lr, global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) if teacher is not None: tb_writer.add_scalar("loss/distil", loss_logits.item(), global_step) if args.regularization is not None: tb_writer.add_scalar("loss/regularization", regu_.item(), global_step) if (teacher is not None) or (args.regularization is not None): if (teacher is not None) and (args.regularization is not None): tb_writer.add_scalar( "loss/instant_ce", (loss.item() - regu_lambda * regu_.item() - args.alpha_distil * loss_logits.item()) / args.alpha_ce, global_step, ) elif teacher is not None: tb_writer.add_scalar( "loss/instant_ce", (loss.item() - args.alpha_distil * loss_logits.item()) / args.alpha_ce, global_step, ) else: tb_writer.add_scalar( "loss/instant_ce", loss.item() - regu_lambda * regu_.item(), global_step ) logging_loss = tr_loss # Save model checkpoint if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() # Global TopK if args.global_topk: threshold_mem = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] example_indices = batch[3] # XLNet and XLM use more arguments for their predictions if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) # for lang_id-sensitive xlm models if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) if "masked" in args.model_type: inputs["threshold"] = args.final_threshold if args.global_topk: if threshold_mem is None: concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * args.final_threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() inputs["threshold"] = threshold_mem outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None # XLNet and XLM use a more complex post-processing procedure if args.model_type in ["xlnet", "xlm"]: start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_dir = args.data_dir if args.data_dir else "." cached_features_file = os.path.join( input_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", args.tokenizer_name if args.tokenizer_name else list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), list(filter(None, args.predict_file.split("/"))).pop() if evaluate else list(filter(None, args.train_file.split("/"))).pop(), ), ) # Init features and dataset from cache if it exists if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) else: logger.info("Creating features from dataset file at %s", input_dir) if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): try: import tensorflow_datasets as tfds except ImportError: raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") if args.version_2_with_negative: logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.") tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) else: processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.", ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") # Pruning parameters parser.add_argument( "--mask_scores_learning_rate", default=1e-2, type=float, help="The Adam initial learning rate of the mask scores.", ) parser.add_argument( "--initial_threshold", default=1.0, type=float, help="Initial value of the threshold (for scheduling)." ) parser.add_argument( "--final_threshold", default=0.7, type=float, help="Final value of the threshold (for scheduling)." ) parser.add_argument( "--initial_warmup", default=1, type=int, help="Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays" "at its `initial_threshold` value (sparsity schedule).", ) parser.add_argument( "--final_warmup", default=2, type=int, help="Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays" "at its final_threshold value (sparsity schedule).", ) parser.add_argument( "--pruning_method", default="topK", type=str, help="Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning, sigmoied_threshold = Soft movement pruning).", ) parser.add_argument( "--mask_init", default="constant", type=str, help="Initialization method for the mask scores. Choices: constant, uniform, kaiming.", ) parser.add_argument( "--mask_scale", default=0.0, type=float, help="Initialization parameter for the chosen initialization method." ) parser.add_argument("--regularization", default=None, help="Add L0 or L1 regularization to the mask scores.") parser.add_argument( "--final_lambda", default=0.0, type=float, help="Regularization intensity (used in conjunction with `regulariation`.", ) parser.add_argument("--global_topk", action="store_true", help="Global TopK on the Scores.") parser.add_argument( "--global_topk_frequency_compute", default=25, type=int, help="Frequency at which we compute the TopK global threshold.", ) # Distillation parameters (optional) parser.add_argument( "--teacher_type", default=None, type=str, help="Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.", ) parser.add_argument( "--teacher_name_or_path", default=None, type=str, help="Path to the already SQuAD fine-tuned teacher model. Only for distillation.", ) parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Cross entropy loss linear weight. Only for distillation." ) parser.add_argument( "--alpha_distil", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." ) parser.add_argument( "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.", ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.", ) parser.add_argument( "--verbose_logging", action="store_true", help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.", ) parser.add_argument( "--lang_id", default=0, type=int, help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)", ) parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") args = parser.parse_args() # Regularization if args.regularization == "null": args.regularization = None if args.doc_stride >= args.max_seq_length - args.max_query_length: logger.warning( "WARNING - You've set a doc stride which may be superior to the document length in some " "examples. This could result in errors when building features from the examples. Please reduce the doc " "stride or increase the maximum length to ensure the features are correctly built." ) if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, pruning_method=args.pruning_method, mask_init=args.mask_init, mask_scale=args.mask_scale, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_distil > 0.0 assert args.alpha_distil + args.alpha_ce > 0.0 teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) teacher = teacher_model_class.from_pretrained( args.teacher_name_or_path, from_tf=False, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None, ) teacher.to(args.device) else: teacher = None if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) # , force_download=True) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: logger.info("Loading checkpoints saved during training for evaluation") checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs else: logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path) checkpoints = [args.model_name_or_path] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) # , force_download=True) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) predict_file = list(filter(None, args.predict_file.split("/"))).pop() if not os.path.exists(os.path.join(args.output_dir, predict_file)): os.makedirs(os.path.join(args.output_dir, predict_file)) output_eval_file = os.path.join(args.output_dir, predict_file, "eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("%s = %s\n" % (key, str(results[key]))) return results if __name__ == "__main__": main()
47,622
41.444742
156
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/movement-pruning/emmental/modeling_bert_masked.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Masked Version of BERT. It replaces the `torch.nn.Linear` layers with :class:`~emmental.MaskedLinear` and add an additional parameters in the forward pass to compute the adaptive mask. Built on top of `transformers.modeling_bert`""" import logging import math import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from emmental import MaskedBertConfig from emmental.modules import MaskedLinear from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable from transformers.modeling_bert import ACT2FN, BertLayerNorm, load_tf_weights_in_bert from transformers.modeling_utils import PreTrainedModel, prune_linear_layer logger = logging.getLogger(__name__) class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = MaskedLinear( config.hidden_size, self.all_head_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.key = MaskedLinear( config.hidden_size, self.all_head_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.value = MaskedLinear( config.hidden_size, self.all_head_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): mixed_query_layer = self.query(hidden_states, threshold=threshold) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states, threshold=threshold) mixed_value_layer = self.value(encoder_hidden_states, threshold=threshold) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states, threshold=threshold) mixed_value_layer = self.value(hidden_states, threshold=threshold) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = MaskedLinear( config.hidden_size, config.hidden_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor, threshold): hidden_states = self.dense(hidden_states, threshold=threshold) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, threshold=threshold, ) attention_output = self.output(self_outputs[0], hidden_states, threshold=threshold) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = MaskedLinear( config.hidden_size, config.intermediate_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states, threshold): hidden_states = self.dense(hidden_states, threshold=threshold) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = MaskedLinear( config.intermediate_size, config.hidden_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor, threshold): hidden_states = self.dense(hidden_states, threshold=threshold) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = BertAttention(config) self.is_decoder = config.is_decoder if self.is_decoder: self.crossattention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, threshold=threshold) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.is_decoder and encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights intermediate_output = self.intermediate(attention_output, threshold=threshold) layer_output = self.output(intermediate_output, attention_output, threshold=threshold) outputs = (layer_output,) + outputs return outputs class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, threshold=threshold, ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class MaskedBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MaskedBertConfig load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() MASKED_BERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~emmental.MaskedBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ MASKED_BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """ @add_start_docstrings( "The bare Masked Bert Model transformer outputting raw hidden-states without any specific head on top.", MASKED_BERT_START_DOCSTRING, ) class MaskedBertModel(MaskedBertPreTrainedModel): """ The `MaskedBertModel` class replicates the :class:`~transformers.BertModel` class and adds specific inputs to compute the adaptive mask on the fly. Note that we freeze the embeddings modules from their pre-trained values. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.embeddings.requires_grad_(requires_grad=False) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): r""" threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Return: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pre-training. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] causal_mask = causal_mask.to( attention_mask.dtype ) # causal and attention masks must have same type with pytorch version < 1.3 extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] elif encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})".format( encoder_hidden_shape, encoder_attention_mask.shape ) ) encoder_extended_attention_mask = encoder_extended_attention_mask.to( dtype=next(self.parameters()).dtype ) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = ( head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) ) # We can specify head_mask for each layer head_mask = head_mask.to( dtype=next(self.parameters()).dtype ) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, threshold=threshold, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions) @add_start_docstrings( """Masked Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MASKED_BERT_START_DOCSTRING, ) class MaskedBertForSequenceClassification(MaskedBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = MaskedBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, threshold=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, threshold=threshold, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) @add_start_docstrings( """Masked Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MASKED_BERT_START_DOCSTRING, ) class MaskedBertForMultipleChoice(MaskedBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = MaskedBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, threshold=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided): Classification loss. classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`): `num_choices` is the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ num_choices = input_ids.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, threshold=threshold, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) outputs = (loss,) + outputs return outputs # (loss), reshaped_logits, (hidden_states), (attentions) @add_start_docstrings( """Masked Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MASKED_BERT_START_DOCSTRING, ) class MaskedBertForTokenClassification(MaskedBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = MaskedBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, threshold=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) : Classification loss. scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`) Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, threshold=threshold, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), scores, (hidden_states), (attentions) @add_start_docstrings( """Masked Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MASKED_BERT_START_DOCSTRING, ) class MaskedBertForQuestionAnswering(MaskedBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = MaskedBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, threshold=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): Span-end scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, threshold=threshold, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = (start_logits, end_logits,) + outputs[2:] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
46,960
45.130648
154
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/movement-pruning/emmental/modules/masked_nn.py
# coding=utf-8 # Copyright 2020-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Masked Linear module: A fully connected layer that computes an adaptive binary mask on the fly. The mask (binary or not) is computed at each forward pass and multiplied against the weight matrix to prune a portion of the weights. The pruned weight matrix is then multiplied against the inputs (and if necessary, the bias is added). """ import math import torch from torch import nn from torch.nn import functional as F from torch.nn import init from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer class MaskedLinear(nn.Linear): """ Fully Connected layer with on the fly adaptive mask. If needed, a score matrix is created to store the importance of each associated weight. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, mask_init: str = "constant", mask_scale: float = 0.0, pruning_method: str = "topK", ): """ Args: in_features (`int`) Size of each input sample out_features (`int`) Size of each output sample bias (`bool`) If set to ``False``, the layer will not learn an additive bias. Default: ``True`` mask_init (`str`) The initialization method for the score matrix if a score matrix is needed. Choices: ["constant", "uniform", "kaiming"] Default: ``constant`` mask_scale (`float`) The initialization parameter for the chosen initialization method `mask_init`. Default: ``0.`` pruning_method (`str`) Method to compute the mask. Choices: ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"] Default: ``topK`` """ super(MaskedLinear, self).__init__(in_features=in_features, out_features=out_features, bias=bias) assert pruning_method in ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"] self.pruning_method = pruning_method if self.pruning_method in ["topK", "threshold", "sigmoied_threshold", "l0"]: self.mask_scale = mask_scale self.mask_init = mask_init self.mask_scores = nn.Parameter(torch.Tensor(self.weight.size())) self.init_mask() def init_mask(self): if self.mask_init == "constant": init.constant_(self.mask_scores, val=self.mask_scale) elif self.mask_init == "uniform": init.uniform_(self.mask_scores, a=-self.mask_scale, b=self.mask_scale) elif self.mask_init == "kaiming": init.kaiming_uniform_(self.mask_scores, a=math.sqrt(5)) def forward(self, input: torch.tensor, threshold: float): # Get the mask if self.pruning_method == "topK": mask = TopKBinarizer.apply(self.mask_scores, threshold) elif self.pruning_method in ["threshold", "sigmoied_threshold"]: sig = "sigmoied" in self.pruning_method mask = ThresholdBinarizer.apply(self.mask_scores, threshold, sig) elif self.pruning_method == "magnitude": mask = MagnitudeBinarizer.apply(self.weight, threshold) elif self.pruning_method == "l0": l, r, b = -0.1, 1.1, 2 / 3 if self.training: u = torch.zeros_like(self.mask_scores).uniform_().clamp(0.0001, 0.9999) s = torch.sigmoid((u.log() - (1 - u).log() + self.mask_scores) / b) else: s = torch.sigmoid(self.mask_scores) s_bar = s * (r - l) + l mask = s_bar.clamp(min=0.0, max=1.0) # Mask weights with computed mask weight_thresholded = mask * self.weight # Compute output (linear layer) with masked weights return F.linear(input, weight_thresholded, self.bias)
4,532
40.972222
105
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/movement-pruning/emmental/modules/binarizer.py
# coding=utf-8 # Copyright 2020-present, AllenAI Authors, University of Illinois Urbana-Champaign, # Intel Nervana Systems and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Binarizers take a (real value) matrice as input and produce a binary (values in {0,1}) mask of the same shape. """ import torch from torch import autograd class ThresholdBinarizer(autograd.Function): """ Thresholdd binarizer. Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j} > \tau` where `\tau` is a real value threshold. Implementation is inspired from: https://github.com/arunmallya/piggyback Piggyback: Adapting a Single Network to Multiple Tasks by Learning to Mask Weights Arun Mallya, Dillon Davis, Svetlana Lazebnik """ @staticmethod def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool): """ Args: inputs (`torch.FloatTensor`) The input matrix from which the binarizer computes the binary mask. threshold (`float`) The threshold value (in R). sigmoid (`bool`) If set to ``True``, we apply the sigmoid function to the `inputs` matrix before comparing to `threshold`. In this case, `threshold` should be a value between 0 and 1. Returns: mask (`torch.FloatTensor`) Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is retained, 0 - the associated weight is pruned). """ nb_elems = inputs.numel() nb_min = int(0.005 * nb_elems) + 1 if sigmoid: mask = (torch.sigmoid(inputs) > threshold).type(inputs.type()) else: mask = (inputs > threshold).type(inputs.type()) if mask.sum() < nb_min: # We limit the pruning so that at least 0.5% (half a percent) of the weights are remaining k_threshold = inputs.flatten().kthvalue(max(nb_elems - nb_min, 1)).values mask = (inputs > k_threshold).type(inputs.type()) return mask @staticmethod def backward(ctx, gradOutput): return gradOutput, None, None class TopKBinarizer(autograd.Function): """ Top-k Binarizer. Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}` is among the k% highest values of S. Implementation is inspired from: https://github.com/allenai/hidden-networks What's hidden in a randomly weighted neural network? Vivek Ramanujan*, Mitchell Wortsman*, Aniruddha Kembhavi, Ali Farhadi, Mohammad Rastegari """ @staticmethod def forward(ctx, inputs: torch.tensor, threshold: float): """ Args: inputs (`torch.FloatTensor`) The input matrix from which the binarizer computes the binary mask. threshold (`float`) The percentage of weights to keep (the rest is pruned). `threshold` is a float between 0 and 1. Returns: mask (`torch.FloatTensor`) Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is retained, 0 - the associated weight is pruned). """ # Get the subnetwork by sorting the inputs and using the top threshold % mask = inputs.clone() _, idx = inputs.flatten().sort(descending=True) j = int(threshold * inputs.numel()) # flat_out and mask access the same memory. flat_out = mask.flatten() flat_out[idx[j:]] = 0 flat_out[idx[:j]] = 1 return mask @staticmethod def backward(ctx, gradOutput): return gradOutput, None class MagnitudeBinarizer(object): """ Magnitude Binarizer. Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}` is among the k% highest values of |S| (absolute value). Implementation is inspired from https://github.com/NervanaSystems/distiller/blob/2291fdcc2ea642a98d4e20629acb5a9e2e04b4e6/distiller/pruning/automated_gradual_pruner.py#L24 """ @staticmethod def apply(inputs: torch.tensor, threshold: float): """ Args: inputs (`torch.FloatTensor`) The input matrix from which the binarizer computes the binary mask. This input marix is typically the weight matrix. threshold (`float`) The percentage of weights to keep (the rest is pruned). `threshold` is a float between 0 and 1. Returns: mask (`torch.FloatTensor`) Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is retained, 0 - the associated weight is pruned). """ # Get the subnetwork by sorting the inputs and using the top threshold % mask = inputs.clone() _, idx = inputs.abs().flatten().sort(descending=True) j = int(threshold * inputs.numel()) # flat_out and mask access the same memory. flat_out = mask.flatten() flat_out[idx[j:]] = 0 flat_out[idx[:j]] = 1 return mask
5,823
39.165517
175
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/test_seq2seq_examples.py
import argparse import logging import os import sys import tempfile import unittest from pathlib import Path from unittest.mock import patch import pytest import torch from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.testing_utils import require_multigpu from .distillation import distill_main, evaluate_checkpoint from .finetune import main from .run_eval import generate_summaries_or_translations, run_generate from .utils import SummarizationDataset, lmap, load_json logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() CUDA_AVAILABLE = torch.cuda.is_available() CHEAP_ARGS = { "logger": "default", "length_penalty": 0.5, "cache_dir": "", "task": "summarization", "num_workers": 2, "alpha_hid": 0, "freeze_embeds": True, "enc_only": False, "tgt_suffix": "", "resume_from_checkpoint": None, "sortish_sampler": True, "student_decoder_layers": 1, "val_check_interval": 1.0, "output_dir": "", "fp16": CUDA_AVAILABLE, "no_teacher": False, "fp16_opt_level": "O1", "gpus": 1 if CUDA_AVAILABLE else 0, "n_tpu_cores": 0, "max_grad_norm": 1.0, "do_train": True, "do_predict": True, "gradient_accumulation_steps": 1, "server_ip": "", "server_port": "", "seed": 42, "model_name_or_path": "sshleifer/bart-tiny-random", "config_name": "", "tokenizer_name": "facebook/bart-large", "do_lower_case": False, "learning_rate": 0.3, "weight_decay": 0.0, "adam_epsilon": 1e-08, "warmup_steps": 0, "num_train_epochs": 1, "train_batch_size": 2, "eval_batch_size": 2, "max_source_length": 12, "max_target_length": 12, "val_max_target_length": 12, "test_max_target_length": 12, "fast_dev_run": False, "no_cache": False, "n_train": -1, "n_val": -1, "n_test": -1, "student_encoder_layers": 1, "alpha_loss_encoder": 0.0, "freeze_encoder": False, "auto_scale_batch_size": False, } def _dump_articles(path: Path, articles: list): with path.open("w") as f: f.write("\n".join(articles)) ARTICLES = [" Sam ate lunch today", "Sams lunch ingredients"] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] T5_TINY = "patrickvonplaten/t5-tiny-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" MARIAN_TINY = "sshleifer/tiny-marian-en-de" stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks def make_test_data_dir(**kwargs): tmp_dir = Path(tempfile.mkdtemp(**kwargs)) for split in ["train", "val", "test"]: _dump_articles((tmp_dir / f"{split}.source"), ARTICLES) _dump_articles((tmp_dir / f"{split}.target"), SUMMARIES) return tmp_dir class TestSummarizationDistiller(unittest.TestCase): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks return cls @require_multigpu def test_multigpu(self): updates = dict(no_teacher=True, freeze_encoder=True, gpus=2, sortish_sampler=False,) self._test_distiller_cli(updates) def test_distill_no_teacher(self): updates = dict(student_encoder_layers=2, student_decoder_layers=1, no_teacher=True) self._test_distiller_cli(updates) def test_distill_checkpointing_with_teacher(self): updates = dict( student_encoder_layers=2, student_decoder_layers=1, num_train_epochs=4, val_check_interval=0.25, alpha_hid=2.0, model_name_or_path="IGNORE_THIS_IT_DOESNT_GET_USED", ) model = self._test_distiller_cli(updates, check_contents=False) ckpts = list(Path(model.output_dir).glob("*.ckpt")) self.assertEqual(1, len(ckpts)) transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) self.assertEqual(len(transformer_ckpts), 2) examples = lmap(str.strip, model.hparams.data_dir.joinpath("test.source").open().readlines()) out_path = tempfile.mktemp() generate_summaries_or_translations(examples, out_path, str(model.output_dir / "best_tfmr")) self.assertTrue(Path(out_path).exists()) evaluate_checkpoint(ckpts[0], dest_dir=Path(tempfile.mkdtemp())) @unittest.skip("T5 distillation is broken at the moment") def test_distill_t5(self): updates = dict( student_encoder_layers=1, student_decoder_layers=1, alpha_hid=2.0, teacher=T5_TINY, model_name_or_path=T5_TINY, tokenizer_name=T5_TINY, ) self._test_distiller_cli(updates) def _test_distiller_cli(self, updates, check_contents=True): default_updates = dict( train_batch_size=1, eval_batch_size=2, num_train_epochs=2, alpha_mlm=0.2, alpha_ce=0.8, do_predict=True, model_name_or_path="sshleifer/tinier_bart", teacher=CHEAP_ARGS["model_name_or_path"], val_check_interval=0.5, alpha_encoder_loss=0.4, ) default_updates.update(updates) args_d: dict = CHEAP_ARGS.copy() tmp_dir = make_test_data_dir() output_dir = tempfile.mkdtemp(prefix="output_") args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates) model = distill_main(argparse.Namespace(**args_d)) if not check_contents: return model contents = os.listdir(output_dir) ckpt_name = "val_avg_rouge2=0.0000-step_count=2.ckpt" # "val_avg_rouge2=0.0000-epoch=1.ckpt" # "epoch=1-val_avg_rouge2=0.0000.ckpt" contents = {os.path.basename(p) for p in contents} self.assertIn(ckpt_name, contents) self.assertIn("test_generations.txt", contents) self.assertIn("test_results.txt", contents) metrics = load_json(model.metrics_save_path) last_step_stats = metrics["val"][-1] self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01) self.assertGreaterEqual(1.0, last_step_stats["val_avg_gen_time"]) self.assertIsInstance(last_step_stats[f"val_avg_{model.val_metric}"], float) desired_n_evals = int(args_d["num_train_epochs"] * (1 / args_d["val_check_interval"]) + 1) self.assertEqual(len(metrics["val"]), desired_n_evals) self.assertEqual(len(metrics["test"]), 1) return model @pytest.mark.parametrize(["model"], [pytest.param(T5_TINY), pytest.param(BART_TINY), pytest.param(MBART_TINY)]) def test_run_eval_bart(model): input_file_name = Path(tempfile.mkdtemp()) / "utest_input.source" output_file_name = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() articles = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(input_file_name, articles) testargs = ["run_eval.py", model, str(input_file_name), str(output_file_name)] # TODO: test score_path with patch.object(sys, "argv", testargs): run_generate() assert Path(output_file_name).exists() os.remove(Path(output_file_name)) @pytest.mark.parametrize( ["model"], [pytest.param(T5_TINY), pytest.param(BART_TINY), pytest.param(MBART_TINY), pytest.param(MARIAN_TINY)] ) def test_finetune(model): args_d: dict = CHEAP_ARGS.copy() task = "translation" if model in [MBART_TINY, MARIAN_TINY] else "summarization" tmp_dir = make_test_data_dir() output_dir = tempfile.mkdtemp(prefix="output_") args_d.update( data_dir=tmp_dir, model_name_or_path=model, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, output_dir=output_dir, do_predict=True, task=task, ) assert "n_train" in args_d args = argparse.Namespace(**args_d) main(args) @pytest.mark.parametrize( ["tok"], [pytest.param(T5_TINY), pytest.param(BART_TINY), pytest.param(MBART_TINY), pytest.param(MARIAN_TINY)] ) def test_dataset(tok): tokenizer = AutoTokenizer.from_pretrained(tok) tmp_dir = make_test_data_dir() max_len_source = max(len(tokenizer.encode(a)) for a in ARTICLES) max_len_target = max(len(tokenizer.encode(a)) for a in SUMMARIES) trunc_target = 4 train_dataset = SummarizationDataset( tokenizer, data_dir=tmp_dir, type_path="train", max_source_length=20, max_target_length=trunc_target, ) dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["decoder_input_ids"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated
9,237
35.513834
141
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/callbacks.py
import logging import os from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only def count_trainable_parameters(model): model_parameters = filter(lambda p: p.requires_grad, model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) return params logger = logging.getLogger(__name__) class Seq2SeqLoggingCallback(pl.Callback): @rank_zero_only def _write_logs( self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True ) -> None: logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") metrics = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) # Log results od = Path(pl_module.hparams.output_dir) if type_path == "test": results_file = od / "test_results.txt" generations_file = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=True) generations_file.parent.mkdir(exist_ok=True) with open(results_file, "a+") as writer: for key in sorted(metrics): if key in ["log", "progress_bar", "preds"]: continue val = metrics[key] if isinstance(val, torch.Tensor): val = val.item() msg = f"{key}: {val:.6f}\n" writer.write(msg) if not save_generations: return if "preds" in metrics: content = "\n".join(metrics["preds"]) generations_file.open("w+").write(content) @rank_zero_only def on_train_start(self, trainer, pl_module): try: npars = pl_module.model.model.num_parameters() except AttributeError: npars = pl_module.model.num_parameters() n_trainable_pars = count_trainable_parameters(pl_module) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6}) @rank_zero_only def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): return self._write_logs(trainer, pl_module, "test") def get_checkpoint_callback(output_dir, metric): """Saves the best model by validation ROUGE2 score.""" if metric == "rouge2": exp = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": exp = "{val_avg_bleu:.4f}-{step_count}" else: raise NotImplementedError( f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this function." ) checkpoint_callback = ModelCheckpoint( filepath=os.path.join(output_dir, exp), monitor=f"val_{metric}", mode="max", save_top_k=1, period=0, # maybe save a checkpoint every time val is run, not just end of epoch. ) return checkpoint_callback
3,529
36.956989
126
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/run_eval.py
import argparse import json from pathlib import Path import torch from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer try: from .utils import calculate_rouge, use_task_specific_params, calculate_bleu_score, trim_batch except ImportError: from utils import calculate_rouge, use_task_specific_params, calculate_bleu_score, trim_batch DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu" def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i : i + n] def generate_summaries_or_translations( examples: list, out_file: str, model_name: str, batch_size: int = 8, device: str = DEFAULT_DEVICE, fp16=False, task="summarization", **gen_kwargs, ) -> None: fout = Path(out_file).open("w", encoding="utf-8") model_name = str(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) if fp16: model = model.half() tokenizer = AutoTokenizer.from_pretrained(model_name) # update config with summarization specific params use_task_specific_params(model, task) for batch in tqdm(list(chunks(examples, batch_size))): if "t5" in model_name: batch = [model.config.prefix + text for text in batch] batch = tokenizer(batch, max_length=1024, return_tensors="pt", truncation=True, padding="max_length").to( device ) input_ids, attention_mask = trim_batch(**batch, pad_token_id=tokenizer.pad_token_id) summaries = model.generate(input_ids=input_ids, attention_mask=attention_mask, **gen_kwargs) dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) for hypothesis in dec: fout.write(hypothesis + "\n") fout.flush() def run_generate(): parser = argparse.ArgumentParser() parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.") parser.add_argument("input_path", type=str, help="like cnn_dm/test.source") parser.add_argument("save_path", type=str, help="where to save summaries") parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test_reference_summaries.txt") parser.add_argument("--score_path", type=str, required=False, help="where to save the rouge score in json format") parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.") parser.add_argument("--task", type=str, default="summarization", help="typically translation or summarization") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all." ) parser.add_argument("--fp16", action="store_true") args = parser.parse_args() examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path).readlines()] if args.n_obs > 0: examples = examples[: args.n_obs] generate_summaries_or_translations( examples, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fp16=args.fp16, task=args.task, ) if args.reference_path is None: return # Compute scores score_fn = calculate_bleu_score if "translation" in args.task else calculate_rouge output_lns = [x.rstrip() for x in open(args.save_path).readlines()] reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)] scores: dict = score_fn(output_lns, reference_lns) if args.score_path is not None: json.dump(scores, open(args.score_path, "w+")) return scores if __name__ == "__main__": run_generate()
3,919
37.058252
119
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/utils.py
import itertools import json import os import pickle from pathlib import Path from typing import Callable, Dict, Iterable, List import git import numpy as np import torch from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from tqdm import tqdm def encode_file( tokenizer, data_path, max_length, pad_to_max_length=True, return_tensors="pt", overwrite_cache=False, prefix="", tok_name="", ): cache_path = Path(f"{data_path}_{tok_name}{max_length}.pt") if not overwrite_cache and cache_path.exists(): try: examples = torch.load(cache_path) assert isinstance(examples, list) return examples except Exception: print(f"failed to load from {cache_path}, retokenizing {data_path}") data_path = Path(data_path) lns = lmap(str.strip, data_path.open().readlines()) lns = [prefix + text for text in lns] assert lns, f"found empty file at {data_path}" examples = [] for text in tqdm(lns, desc=f"Tokenizing {data_path.name}"): tokenized = tokenizer( [text], max_length=max_length, padding="max_length" if pad_to_max_length else None, truncation=True, add_prefix_space=True, return_tensors=return_tensors, ) assert tokenized.input_ids.shape[1] == max_length examples.append(tokenized) torch.save(lmap(dict, examples), cache_path.open("wb")) return examples def lmap(f: Callable, x: Iterable) -> List: """list(map(f, x))""" return list(map(f, x)) def calculate_bleu_score(output_lns, refs_lns, **kwargs) -> dict: """Uses sacrebleu's corpus_bleu implementation.""" return {"bleu": corpus_bleu(output_lns, [refs_lns], **kwargs).score} def trim_batch( input_ids, pad_token_id, attention_mask=None, ): """Remove columns that are populated exclusively by pad_token_id""" keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class SummarizationDataset(Dataset): def __init__( self, tokenizer, data_dir, type_path="train", max_source_length=1024, max_target_length=56, n_obs=None, overwrite_cache=False, prefix="", ): super().__init__() tok_name = tokenizer.__class__.__name__.lower().rstrip("tokenizer") self.source = encode_file( tokenizer, os.path.join(data_dir, type_path + ".source"), max_source_length, overwrite_cache=overwrite_cache, prefix=prefix, tok_name=tok_name, ) tgt_path = os.path.join(data_dir, type_path + ".target") if hasattr(tokenizer, "set_lang"): tokenizer.set_lang("ro_RO") # HACK: only applies to mbart self.target = encode_file( tokenizer, tgt_path, max_target_length, overwrite_cache=overwrite_cache, tok_name=tok_name ) if n_obs is not None: self.source = self.source[:n_obs] self.target = self.target[:n_obs] self.pad_token_id = tokenizer.pad_token_id def __len__(self): return len(self.source) def __getitem__(self, index): source_ids = self.source[index]["input_ids"].squeeze() target_ids = self.target[index]["input_ids"].squeeze() src_mask = self.source[index]["attention_mask"].squeeze() return {"input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids} @staticmethod def trim_seq2seq_batch(batch, pad_token_id): y = trim_batch(batch["decoder_input_ids"], pad_token_id) source_ids, source_mask = trim_batch(batch["input_ids"], pad_token_id, attention_mask=batch["attention_mask"]) return source_ids, source_mask, y def collate_fn(self, batch) -> dict: input_ids = torch.stack([x["input_ids"] for x in batch]) masks = torch.stack([x["attention_mask"] for x in batch]) target_ids = torch.stack([x["decoder_input_ids"] for x in batch]) pad_token_id = self.pad_token_id y = trim_batch(target_ids, pad_token_id) source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks) batch = {"input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y} return batch @property def src_lens(self): # Can delete? return lmap(len, self.source) @property def tgt_lens(self): return lmap(len, self.target) def make_sortish_sampler(self, batch_size): return SortishSampler(self.source, batch_size) class SortishSampler(Sampler): "Go through the text data by order of src length with a bit of randomness. From fastai repo." def __init__(self, data, batch_size): self.data, self.bs = data, batch_size def key(self, i): return len(self.data[i]) def __len__(self) -> int: return len(self.data) def __iter__(self): idxs = np.random.permutation(len(self.data)) sz = self.bs * 50 ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)] sort_idx = np.concatenate([sorted(s, key=self.key, reverse=True) for s in ck_idx]) sz = self.bs ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)] max_ck = np.argmax([self.key(ck[0]) for ck in ck_idx]) # find the chunk with the largest key, ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first. sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int) sort_idx = np.concatenate((ck_idx[0], sort_idx)) return iter(sort_idx) def use_task_specific_params(model, task): # update config with summarization specific params task_specific_params = model.config.task_specific_params if task_specific_params is not None: model.config.update(task_specific_params.get(task, {})) def pickle_load(path): """pickle.load(path)""" with open(path, "rb") as f: return pickle.load(f) def pickle_save(obj, path): """pickle.dump(obj, path)""" with open(path, "wb") as f: return pickle.dump(obj, f) def flatten_list(summary_ids: List[List]): return [x for x in itertools.chain.from_iterable(summary_ids)] def save_git_info(folder_path: str) -> None: """Save git information to output_dir/git_log.json""" repo_infos = get_git_info() save_json(repo_infos, os.path.join(folder_path, "git_log.json")) def save_json(content, path): with open(path, "w") as f: json.dump(content, f, indent=4) def load_json(path): with open(path) as f: return json.load(f) def get_git_info(): repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), } return repo_infos ROUGE_KEYS = ["rouge1", "rouge2", "rougeL"] def calculate_rouge(output_lns: List[str], reference_lns: List[str]) -> Dict: scorer = rouge_scorer.RougeScorer(ROUGE_KEYS, use_stemmer=True) aggregator = scoring.BootstrapAggregator() for reference_ln, output_ln in zip(reference_lns, output_lns): scores = scorer.score(reference_ln, output_ln) aggregator.add_scores(scores) result = aggregator.aggregate() return {k: v.mid.fmeasure for k, v in result.items()} def freeze_params(model: nn.Module): for par in model.parameters(): par.requires_grad = False def grad_status(model: nn.Module) -> Iterable: return (par.requires_grad for par in model.parameters()) def any_requires_grad(model: nn.Module) -> bool: return any(grad_status(model)) def assert_all_frozen(model): model_grads: List[bool] = list(grad_status(model)) n_require_grad = sum(lmap(int, model_grads)) npars = len(model_grads) assert not any(model_grads), f"{n_require_grad/npars:.1%} of {npars} weights require grad" def assert_not_all_frozen(model): model_grads: List[bool] = list(grad_status(model)) npars = len(model_grads) assert any(model_grads), f"none of {npars} weights require grad"
8,503
31.334601
119
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/finetune.py
import argparse import glob import logging import os import time import warnings from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from torch.utils.data import DataLoader from lightning_base import BaseTransformer, add_generic_args, generic_train from transformers import get_linear_schedule_with_warmup try: from .utils import ( use_task_specific_params, SummarizationDataset, lmap, flatten_list, pickle_save, save_git_info, save_json, freeze_params, calculate_rouge, get_git_info, ROUGE_KEYS, calculate_bleu_score, ) from .callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback except ImportError: from utils import ( use_task_specific_params, SummarizationDataset, lmap, flatten_list, pickle_save, save_git_info, save_json, freeze_params, calculate_rouge, get_git_info, ROUGE_KEYS, calculate_bleu_score, ) from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback logger = logging.getLogger(__name__) class SummarizationModule(BaseTransformer): mode = "summarization" loss_names = ["loss"] metric_names = ROUGE_KEYS val_metric = "rouge2" def __init__(self, hparams, **kwargs): super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs) use_task_specific_params(self.model, "summarization") save_git_info(self.hparams.output_dir) self.metrics_save_path = Path(self.output_dir) / "metrics.json" self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" pickle_save(self.hparams, self.hparams_save_path) self.step_count = 0 self.metrics = defaultdict(list) self.dataset_kwargs: dict = dict( data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=self.model.config.prefix or "", ) n_observations_per_split = { "train": self.hparams.n_train, "val": self.hparams.n_val, "test": self.hparams.n_test, } self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} self.target_lens = { "train": self.hparams.max_target_length, "val": self.hparams.val_max_target_length, "test": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" if self.hparams.freeze_embeds: self.freeze_embeds() if self.hparams.freeze_encoder: freeze_params(self.model.model.encoder) # TODO: this will break for t5 self.hparams.git_sha = get_git_info()["repo_sha"] self.num_workers = hparams.num_workers def freeze_embeds(self): """Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.""" try: freeze_params(self.model.model.shared) for d in [self.model.model.encoder, self.model.model.decoder]: freeze_params(d.embed_positions) freeze_params(d.embed_tokens) except AttributeError: freeze_params(self.model.shared) for d in [self.model.encoder, self.model.decoder]: freeze_params(d.embed_tokens) def forward(self, input_ids, **kwargs): return self.model(input_ids, **kwargs) def ids_to_clean_text(self, generated_ids: List[int]): gen_text = self.tokenizer.batch_decode( generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) return lmap(str.strip, gen_text) def _step(self, batch: dict) -> Tuple: pad_token_id = self.tokenizer.pad_token_id source_ids, source_mask, y = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"] y_ids = y[:, :-1].contiguous() lm_labels = y[:, 1:].clone() lm_labels[y[:, 1:] == pad_token_id] = -100 outputs = self(source_ids, attention_mask=source_mask, decoder_input_ids=y_ids, labels=lm_labels,) loss = outputs[0] return (loss,) def training_step(self, batch, batch_idx) -> Dict: loss_tensors = self._step(batch) logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} return {"loss": loss_tensors[0], "log": logs} def validation_step(self, batch, batch_idx) -> Dict: return self._generative_step(batch) def validation_epoch_end(self, outputs, prefix="val") -> Dict: self.step_count += 1 losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} loss = losses["loss"] rouges = {k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "summ_len"]} rouge_tensor: torch.FloatTensor = torch.tensor(rouges[self.val_metric]).type_as(loss) rouges.update({k: v.item() for k, v in losses.items()}) losses.update(rouges) metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()} metrics["step_count"] = self.step_count self.save_metrics(metrics, prefix) # writes to self.metrics_save_path preds = flatten_list([x["preds"] for x in outputs]) return {"log": metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": rouge_tensor} def save_metrics(self, latest_metrics, type_path) -> None: self.metrics[type_path].append(latest_metrics) save_json(self.metrics, self.metrics_save_path) def calc_generative_metrics(self, preds, target) -> Dict: return calculate_rouge(preds, target) def _generative_step(self, batch: dict) -> dict: pad_token_id = self.tokenizer.pad_token_id source_ids, source_mask, y = SummarizationDataset.trim_seq2seq_batch(batch, pad_token_id) t0 = time.time() generated_ids = self.model.generate(input_ids=source_ids, attention_mask=source_mask, use_cache=True,) gen_time = (time.time() - t0) / source_ids.shape[0] preds = self.ids_to_clean_text(generated_ids) target = self.ids_to_clean_text(y) loss_tensors = self._step(batch) base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} rouge: Dict = self.calc_generative_metrics(preds, target) summ_len = np.mean(lmap(len, generated_ids)) base_metrics.update(gen_time=gen_time, summ_len=summ_len, preds=preds, target=target, **rouge) return base_metrics def test_step(self, batch, batch_idx): return self._generative_step(batch) def test_epoch_end(self, outputs): return self.validation_epoch_end(outputs, prefix="test") def get_dataset(self, type_path) -> SummarizationDataset: n_obs = self.n_obs[type_path] max_target_length = self.target_lens[type_path] dataset = SummarizationDataset( self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs, ) return dataset def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: dataset = self.get_dataset(type_path) sampler = None if self.hparams.sortish_sampler and type_path == "train": assert self.hparams.gpus <= 1 # TODO: assert earlier sampler = dataset.make_sortish_sampler(batch_size) shuffle = False dataloader = DataLoader( dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=sampler, ) return dataloader def train_dataloader(self) -> DataLoader: dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) t_total = ( (len(dataloader.dataset) // (self.hparams.train_batch_size * max(1, self.hparams.gpus))) // self.hparams.gradient_accumulation_steps * float(self.hparams.num_train_epochs) ) scheduler = get_linear_schedule_with_warmup( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total ) if max(scheduler.get_last_lr()) > 0: warnings.warn("All learning rates are 0") self.lr_scheduler = scheduler return dataloader def val_dataloader(self) -> DataLoader: return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size) def test_dataloader(self) -> DataLoader: return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size) @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) add_generic_args(parser, root_dir) parser.add_argument( "--max_source_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--max_target_length", default=56, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--val_max_target_length", default=142, # these defaults are optimized for CNNDM. For xsum, see README.md. type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--test_max_target_length", default=142, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--data_dir", type=str, required=True, help="The input data dir. Should contain train.source, train.target, val.source, val.target, test.source, test.target", ) parser.add_argument("--freeze_encoder", action="store_true") parser.add_argument("--freeze_embeds", action="store_true") parser.add_argument("--sortish_sampler", action="store_true", default=False) parser.add_argument("--logger", type=str, choices=["default", "wandb", "wandb_shared"], default="default") parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_val", type=int, default=500, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument( "--task", type=str, default="summarization", required=False, help="# examples. -1 means use all." ) return parser class TranslationModule(SummarizationModule): mode = "translation" loss_names = ["loss"] metric_names = ["bleu"] val_metric = "bleu" def calc_generative_metrics(self, preds, target) -> dict: return calculate_bleu_score(preds, target) def main(args, model=None) -> SummarizationModule: Path(args.output_dir).mkdir(exist_ok=True) if len(os.listdir(args.output_dir)) > 3 and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if model is None: if args.task == "summarization": model: SummarizationModule = SummarizationModule(args) else: model: SummarizationModule = TranslationModule(args) if ( args.logger == "default" or args.fast_dev_run or str(args.output_dir).startswith("/tmp") or str(args.output_dir).startswith("/var") ): logger = True # don't pollute wandb logs unnecessarily elif args.logger == "wandb": from pytorch_lightning.loggers import WandbLogger logger = WandbLogger(name=model.output_dir.name) elif args.logger == "wandb_shared": from pytorch_lightning.loggers import WandbLogger logger = WandbLogger(name=model.output_dir.name) trainer: pl.Trainer = generic_train( model, args, logging_callback=Seq2SeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric), logger=logger, # TODO: early stopping callback seems messed up ) pickle_save(model.hparams, model.output_dir / "hparams.pkl") if not args.do_predict: return model model.hparams.test_checkpoint = "" checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "*.ckpt"), recursive=True))) if checkpoints: model.hparams.test_checkpoint = checkpoints[-1] trainer.resume_from_checkpoint = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams) trainer.test(model) # this breaks in DDP, known lightning issue. See evaluate_checkpoint to recover metrics. return model if __name__ == "__main__": parser = argparse.ArgumentParser() parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() main(args)
13,985
39.53913
131
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/initialization_utils.py
from typing import List from torch import nn def init_student(student, teacher): teacher_state_dict = teacher.state_dict() info = student.load_state_dict(teacher_state_dict, strict=False) assert info.missing_keys == [], info.missing_keys return student, info def copy_decoder_layers(teacher, student, l2copy=[0, 2, 4, 7, 9, 11]): copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, l2copy) def copy_layers(teacher_layers: nn.ModuleList, student_layers: nn.ModuleList, layers_to_copy: List) -> None: layers_to_copy = nn.ModuleList([l for i, l in enumerate(teacher_layers) if i in layers_to_copy]) assert len(student_layers) == len(layers_to_copy), f"{len(student_layers)} != {len(layers_to_copy)}" student_layers.load_state_dict(layers_to_copy.state_dict())
816
37.904762
108
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/distillation.py
import argparse import gc import os from pathlib import Path from typing import List import pytorch_lightning as pl import torch from torch import nn from torch.nn import functional as F from lightning_base import generic_train from transformers import AdamW, BartConfig, BartForConditionalGeneration, T5Config, T5ForConditionalGeneration try: from .finetune import SummarizationModule from .initialization_utils import init_student, copy_layers from .utils import ( use_task_specific_params, SummarizationDataset, pickle_load, freeze_params, assert_all_frozen, any_requires_grad, ) from .finetune import main as ft_main except ImportError: from finetune import SummarizationModule from finetune import main as ft_main from initialization_utils import init_student, copy_layers from utils import ( use_task_specific_params, SummarizationDataset, pickle_load, freeze_params, assert_all_frozen, any_requires_grad, ) class BartSummarizationDistiller(SummarizationModule): loss_names = ["loss", "ce_loss", "mlm_loss", "enc_mse_loss", "hid_loss_enc", "hid_loss_dec"] def __init__(self, hparams): assert Path(hparams.data_dir).exists() student, student_cfg, teacher = self.pre_init(hparams) super().__init__(hparams, model=student, config=student_cfg) self.teacher = teacher use_task_specific_params(self.teacher, "summarization") freeze_params(self.teacher) self.sanity_check_gradients() self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") self.temperature = 2.0 self.alpha_mlm = hparams.alpha_mlm self.alpha_ce = hparams.alpha_ce self.alpha_hid = hparams.alpha_hid # self.alpha_cos = hparams.alpha_cos self.alpha_encoder_loss = self.hparams.alpha_encoder_loss gc.collect() torch.cuda.empty_cache() def sanity_check_gradients(self): assert_all_frozen(self.teacher) assert_all_frozen(self.model.model.decoder.embed_tokens) assert_all_frozen(self.model.model.encoder.embed_tokens) if self.different_encoder: assert any_requires_grad(self.model.model.encoder) else: freeze_params(self.model.model.encoder) del self.teacher.model.encoder def pre_init(self, hparams): self.output_dir = Path(hparams.output_dir) self.output_dir.mkdir(exist_ok=True) teacher = BartForConditionalGeneration.from_pretrained(hparams.teacher).eval() student_updates = { "decoder_layers": hparams.student_decoder_layers, "encoder_layers": hparams.student_encoder_layers, } if hparams.length_penalty != -1: student_updates["length_penalty"] = hparams.length_penalty d_layers_to_copy = get_layers_to_copy(student_updates["decoder_layers"], teacher.config.decoder_layers) e_layers_to_copy: List = get_layers_to_copy(student_updates["encoder_layers"], teacher.config.encoder_layers) hparams.d_layer_to_copy = d_layers_to_copy hparams.e_layer_to_copy = e_layers_to_copy kw = teacher.config.to_diff_dict() kw.update(student_updates) # Copy weights student_cfg = BartConfig(**kw) student = BartForConditionalGeneration(student_cfg) student, _ = init_student(student, teacher) save_dir = self.output_dir.joinpath("student") self.copy_to_student(d_layers_to_copy, e_layers_to_copy, hparams, student, teacher) student.save_pretrained(save_dir) hparams.model_name_or_path = str(save_dir) return student, student_cfg, teacher def copy_to_student(self, d_layers_to_copy, e_layers_to_copy, hparams, student, teacher): if teacher.config.model_type == "t5": return self.copy_t5_to_student(d_layers_to_copy, e_layers_to_copy, hparams, student, teacher) self.different_encoder: bool = hparams.student_encoder_layers != teacher.config.encoder_layers self.different_decoder = hparams.student_decoder_layers != teacher.config.decoder_layers if self.different_decoder: copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, d_layers_to_copy) if self.different_encoder: copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, e_layers_to_copy) def copy_t5_to_student(self, d_layers_to_copy, e_layers_to_copy, hparams, student, teacher): self.different_encoder: bool = hparams.student_encoder_layers != teacher.config.num_layers self.different_decoder = hparams.student_decoder_layers != teacher.config.num_layers if self.different_decoder: copy_layers(teacher.decoder.block, student.decoder.block, d_layers_to_copy) if self.different_encoder: copy_layers(teacher.encoder.block, student.encoder.block, e_layers_to_copy) def get_dataset(self, type_path) -> SummarizationDataset: n_obs = self.n_obs[type_path] dataset = SummarizationDataset(self.tokenizer, type_path=type_path, n_obs=n_obs, **self.dataset_kwargs) return dataset def calc_mse_loss(self, teacher_outputs: torch.Tensor, student_outputs: torch.Tensor, mask) -> torch.FloatTensor: if mask is not None: # mask has False at padding_idx sel_mask = mask[:, :, None].expand_as(student_outputs).bool() s_logits_slct = torch.masked_select(student_outputs, sel_mask) t_logits_slct = torch.masked_select(teacher_outputs, sel_mask) else: t_logits_slct = teacher_outputs s_logits_slct = student_outputs return F.mse_loss(s_logits_slct, t_logits_slct) def calc_ce_loss(self, mask, s_logits, t_logits): if mask is not None: # mask has False at padding_idx sel_mask = mask[:, :, None].expand_as(s_logits) s_logits_slct = torch.masked_select( s_logits, sel_mask ) # (bs * seq_length * voc_size) modulo the 1s in mask t_logits_slct = torch.masked_select( t_logits, sel_mask ) # (bs * seq_length * voc_size) modulo the 1s in mask else: t_logits_slct = t_logits s_logits_slct = s_logits # (bs * seq_length * voc_size) modulo the 1s in mask s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask assert t_logits_slct.size() == s_logits_slct.size() loss_ce = ( self.ce_loss_fct( F.log_softmax(s_logits_slct / self.temperature, dim=-1), F.softmax(t_logits_slct / self.temperature, dim=-1), ) * (self.temperature) ** 2 ) return loss_ce, s_logits_slct, t_logits_slct def configure_optimizers(self): "Prepare optimizer and schedule (linear warmup and decay)" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon) self.opt = optimizer return [optimizer] @staticmethod def add_model_specific_args(parser, root_dir): SummarizationModule.add_model_specific_args(parser, root_dir) parser.add_argument("--teacher", default="facebook/bart-large-cnn", type=str) parser.add_argument("--alpha_ce", default=0.8, type=float) parser.add_argument("--alpha_mlm", default=0.2, type=float) # parser.add_argument("--alpha_cos", default=0.0, type=float) parser.add_argument("--alpha_encoder_loss", default=0.0, type=float) parser.add_argument("--alpha_hid", default=0.0, type=float, required=False) parser.add_argument("--student_decoder_layers", default=12, type=int, required=False) parser.add_argument("--student_encoder_layers", default=12, type=int, required=False) parser.add_argument("--no_teacher", action="store_true", default=False) parser.add_argument("--length_penalty", type=float, default=-1) return parser def _step(self, batch): # assert is_frozen(self.teacher) pad_token_id = self.tokenizer.pad_token_id input_ids, src_mask, y = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"] decoder_input_ids = y[:, :-1].contiguous() labels = y[:, 1:].clone() labels[y[:, 1:] == pad_token_id] = -100 # noinspection PyCallingNonCallable sloss, slogits, dec_hidden, enc_outputs, enc_hidden_state = self( input_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, labels=labels, output_hidden_states=True, output_attentions=False, ) def zero_tensor(): return torch.tensor(0.0).type_as(sloss) loss_encoder, hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor(), zero_tensor() if self.different_encoder: with torch.no_grad(): teacher_enc_outputs, teacher_enc_hid, _ = self.teacher.model.encoder( input_ids, attention_mask=src_mask, output_hidden_states=True ) if self.hparams.alpha_encoder_loss > 0: loss_encoder = self.calc_mse_loss(enc_outputs, teacher_enc_outputs, src_mask) hid_loss_enc = self.calc_hidden_loss( src_mask, enc_hidden_state, teacher_enc_hid, self.hparams.e_layer_to_copy ) teacher_enc_outputs = (enc_outputs,) assert isinstance(teacher_enc_outputs, tuple), type(teacher_enc_outputs) with torch.no_grad(): tloss, tlogits, tdec_hidden, _ = self.teacher( input_ids, attention_mask=src_mask, encoder_outputs=teacher_enc_outputs, decoder_input_ids=decoder_input_ids, lm_labels=labels, output_hidden_states=True, ) dec_mask = decoder_input_ids.ne(pad_token_id) loss_ce, s_logits_slct, t_logits_slct = self.calc_ce_loss(dec_mask, slogits, tlogits) if self.alpha_hid > 0: hid_loss_dec = self.calc_hidden_loss(dec_mask, dec_hidden, tdec_hidden, self.hparams.d_layer_to_copy) blended_loss = ( self.alpha_ce * loss_ce + self.alpha_mlm * sloss + self.hparams.alpha_encoder_loss * loss_encoder + self.hparams.alpha_hid * (hid_loss_enc + hid_loss_dec) ) return blended_loss, loss_ce, sloss, loss_encoder, hid_loss_enc, hid_loss_dec def calc_hidden_loss(self, attention_mask, hidden_states, hidden_states_T, matches): assert not isinstance( hidden_states, torch.Tensor ), f"expected list or tuple for hidden_states, got tensor of shape {hidden_states.shape}" assert not isinstance( hidden_states_T, torch.Tensor ), f"expected list or tuple for hidden_states_T, got tensor of shape {hidden_states_T.shape}" mask = attention_mask.to(hidden_states[0]) valid_count = mask.sum() * hidden_states[0].size(-1) hidden_losses = [ (F.mse_loss(hidden_states[i], hidden_states_T[j], reduction="none") * mask.unsqueeze(-1)).sum() / valid_count for i, j in enumerate(matches) ] return sum(hidden_losses) class T5SummarizationDistiller(BartSummarizationDistiller): def pre_init(self, hparams): raise NotImplementedError("T5 Distillation does not work yet") self.output_dir = Path(hparams.output_dir) self.output_dir.mkdir(exist_ok=True) teacher = T5ForConditionalGeneration.from_pretrained(hparams.teacher) n_layer = hparams.student_decoder_layers assert n_layer == hparams.student_encoder_layers # TODO(SS): relax this constraint so that we can do 12-6. d_layers_to_copy = get_layers_to_copy(n_layer, len(teacher.decoder.block)) e_layers_to_copy: List = get_layers_to_copy(n_layer, len(teacher.encoder.block)) student_updates = {"num_layers": n_layer} hparams.d_layer_to_copy = d_layers_to_copy hparams.e_layer_to_copy = e_layers_to_copy kw = teacher.config.to_diff_dict() kw.update(student_updates) # Copy weights student_cfg = T5Config(**kw) student = T5ForConditionalGeneration(student_cfg) student, _ = init_student(student, teacher) self.copy_to_student(d_layers_to_copy, e_layers_to_copy, hparams, student, teacher) Path(hparams.output_dir).mkdir(exist_ok=True) task_specific_params = student.config.task_specific_params if task_specific_params is not None: student.config.update(task_specific_params.get("summarization", {})) # TODO: dont hardcode save_dir = self.output_dir.joinpath("student") save_dir.mkdir(exist_ok=True) student.save_pretrained(save_dir) hparams.model_name_or_path = str(save_dir) return student, student_cfg, teacher def freeze_embeds(self): freeze_params(self.model.shared) for d in [self.model.encoder, self.model.decoder]: freeze_params(d.embed_tokens) def sanity_check_gradients(self): """T5""" assert_all_frozen(self.teacher) assert_all_frozen(self.model.decoder.embed_tokens) assert_all_frozen(self.model.encoder.embed_tokens) if self.different_encoder: assert any_requires_grad(self.model.encoder) else: freeze_params(self.model.encoder) del self.teacher.model.encoder if self.different_decoder: assert any_requires_grad(self.model.decoder) else: freeze_params(self.model.decoder) # TODO(SS): very suspicious def _step(self, batch): pad_token_id = self.tokenizer.pad_token_id source_ids, source_mask, y = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"] decoder_input_ids = y[:, :-1].contiguous() labels = y[:, 1:].clone() labels[y[:, 1:] == pad_token_id] = -100 # noinspection PyCallingNonCallable dec_mask = decoder_input_ids.ne(pad_token_id) sloss, slogits, dec_hidden, enc_outputs, enc_hidden_state = self( source_ids, attention_mask=source_mask, decoder_input_ids=decoder_input_ids, labels=labels, output_hidden_states=True, output_attentions=False, use_cache=False, ) def zero_tensor(): return torch.tensor(0.0).type_as(sloss) loss_encoder, hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor(), zero_tensor() if self.different_encoder: with torch.no_grad(): teacher_enc_outputs, teacher_enc_hid = self.teacher.encoder( source_ids, attention_mask=source_mask, output_hidden_states=True, use_cache=False, ) if self.hparams.alpha_encoder_loss > 0: loss_encoder = self.calc_mse_loss(enc_outputs, teacher_enc_outputs, source_mask) hid_loss_enc = self.calc_hidden_loss( source_mask, enc_hidden_state, teacher_enc_hid, self.hparams.e_layer_to_copy ) teacher_enc_outputs = (enc_outputs,) assert isinstance(teacher_enc_outputs, tuple), type(teacher_enc_outputs) with torch.no_grad(): tloss, tlogits, tdec_hidden, _ = self.teacher( source_ids, attention_mask=source_mask, encoder_outputs=teacher_enc_outputs, decoder_input_ids=decoder_input_ids, lm_labels=labels, output_hidden_states=True, use_cache=False, ) loss_ce, s_logits_slct, t_logits_slct = self.calc_ce_loss(dec_mask, slogits, tlogits) if self.alpha_hid > 0: hid_loss_dec = self.calc_hidden_loss(dec_mask, dec_hidden, tdec_hidden, self.hparams.d_layer_to_copy) blended_loss = ( self.alpha_ce * loss_ce + self.alpha_mlm * sloss + self.hparams.alpha_encoder_loss * loss_encoder + self.hparams.alpha_hid * (hid_loss_enc + hid_loss_dec) ) return blended_loss, loss_ce, sloss, loss_encoder, hid_loss_enc, hid_loss_dec def create_module(args): t5 = "t5" in args.model_name_or_path if args.no_teacher: assert not args.enc_only module_cls = SummarizationModule elif t5: module_cls = T5SummarizationDistiller elif args.enc_only: raise ValueError("Deleted that") else: module_cls = BartSummarizationDistiller args.setup_cls: str = module_cls.__name__ model = module_cls(args) return model def evaluate_checkpoint(ckpt_path: Path, dest_dir=None): exp_dir = ckpt_path.parent if dest_dir is None: dest_dir = exp_dir clash = list(dest_dir.glob("test_generations*")) if clash: print(f"SKIPPING to avoid overwriting {clash}") ckpt = torch.load(ckpt_path, map_location="cpu") if "hparams" in ckpt: args = argparse.Namespace(**ckpt["hparams"]) else: args = argparse.Namespace(**pickle_load(exp_dir / "hparams.pkl")) args.resume_from_checkpoint = str(ckpt_path) args.do_train = False args.output_dir = str(dest_dir) args.n_gpu = 1 args.eval_batch_size = 16 Path(args.output_dir).mkdir(exist_ok=True) model = create_module(args) trainer: pl.Trainer = generic_train(model, args, early_stopping_callback=False) trainer.test(model) def get_layers_to_copy(n_to_get, tot): all_layers = list(range(tot)) if tot == 12: # Alternating for special cases layers_to_copy = { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: all_layers, } return layers_to_copy[n_to_get] else: return all_layers[:n_to_get] # TODO: better version on theseus-bart branch def distill_main(args): Path(args.output_dir).mkdir(exist_ok=True) if len(os.listdir(args.output_dir)) > 3 and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) model = create_module(args) return ft_main(args, model=model) if __name__ == "__main__": parser = argparse.ArgumentParser() parser = BartSummarizationDistiller.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() distill_main(args)
19,485
42.015453
118
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/bertabs/modeling_bertabs.py
# MIT License # Copyright (c) 2019 Yang Liu and the HuggingFace team # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import copy import math import numpy as np import torch from torch import nn from torch.nn.init import xavier_uniform_ from configuration_bertabs import BertAbsConfig from transformers import BertConfig, BertModel, PreTrainedModel MAX_SIZE = 5000 BERTABS_FINETUNED_MODEL_ARCHIVE_LIST = [ "remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization", ] class BertAbsPreTrainedModel(PreTrainedModel): config_class = BertAbsConfig load_tf_weights = False base_model_prefix = "bert" class BertAbs(BertAbsPreTrainedModel): def __init__(self, args, checkpoint=None, bert_extractive_checkpoint=None): super().__init__(args) self.args = args self.bert = Bert() # If pre-trained weights are passed for Bert, load these. load_bert_pretrained_extractive = True if bert_extractive_checkpoint else False if load_bert_pretrained_extractive: self.bert.model.load_state_dict( dict([(n[11:], p) for n, p in bert_extractive_checkpoint.items() if n.startswith("bert.model")]), strict=True, ) self.vocab_size = self.bert.model.config.vocab_size if args.max_pos > 512: my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size) my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][ None, : ].repeat(args.max_pos - 512, 1) self.bert.model.embeddings.position_embeddings = my_pos_embeddings tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0) tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight) self.decoder = TransformerDecoder( self.args.dec_layers, self.args.dec_hidden_size, heads=self.args.dec_heads, d_ff=self.args.dec_ff_size, dropout=self.args.dec_dropout, embeddings=tgt_embeddings, vocab_size=self.vocab_size, ) gen_func = nn.LogSoftmax(dim=-1) self.generator = nn.Sequential(nn.Linear(args.dec_hidden_size, args.vocab_size), gen_func) self.generator[0].weight = self.decoder.embeddings.weight load_from_checkpoints = False if checkpoint is None else True if load_from_checkpoints: self.load_state_dict(checkpoint) def init_weights(self): for module in self.decoder.modules(): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() for p in self.generator.parameters(): if p.dim() > 1: xavier_uniform_(p) else: p.data.zero_() def forward( self, encoder_input_ids, decoder_input_ids, token_type_ids, encoder_attention_mask, decoder_attention_mask, ): encoder_output = self.bert( input_ids=encoder_input_ids, token_type_ids=token_type_ids, attention_mask=encoder_attention_mask, ) encoder_hidden_states = encoder_output[0] dec_state = self.decoder.init_decoder_state(encoder_input_ids, encoder_hidden_states) decoder_outputs, _ = self.decoder(decoder_input_ids[:, :-1], encoder_hidden_states, dec_state) return decoder_outputs class Bert(nn.Module): """ This class is not really necessary and should probably disappear. """ def __init__(self): super().__init__() config = BertConfig.from_pretrained("bert-base-uncased") self.model = BertModel(config) def forward(self, input_ids, attention_mask=None, token_type_ids=None, **kwargs): self.eval() with torch.no_grad(): encoder_outputs, _ = self.model( input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, **kwargs ) return encoder_outputs class TransformerDecoder(nn.Module): """ The Transformer decoder from "Attention is All You Need". Args: num_layers (int): number of encoder layers. d_model (int): size of the model heads (int): number of heads d_ff (int): size of the inner FF layer dropout (float): dropout parameters embeddings (:obj:`onmt.modules.Embeddings`): embeddings to use, should have positional encodings attn_type (str): if using a seperate copy attention """ def __init__(self, num_layers, d_model, heads, d_ff, dropout, embeddings, vocab_size): super().__init__() # Basic attributes. self.decoder_type = "transformer" self.num_layers = num_layers self.embeddings = embeddings self.pos_emb = PositionalEncoding(dropout, self.embeddings.embedding_dim) # Build TransformerDecoder. self.transformer_layers = nn.ModuleList( [TransformerDecoderLayer(d_model, heads, d_ff, dropout) for _ in range(num_layers)] ) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) # forward(input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask) # def forward(self, input_ids, state, attention_mask=None, memory_lengths=None, # step=None, cache=None, encoder_attention_mask=None, encoder_hidden_states=None, memory_masks=None): def forward( self, input_ids, encoder_hidden_states=None, state=None, attention_mask=None, memory_lengths=None, step=None, cache=None, encoder_attention_mask=None, ): """ See :obj:`onmt.modules.RNNDecoderBase.forward()` memory_bank = encoder_hidden_states """ # Name conversion tgt = input_ids memory_bank = encoder_hidden_states memory_mask = encoder_attention_mask # src_words = state.src src_words = state.src src_batch, src_len = src_words.size() padding_idx = self.embeddings.padding_idx # Decoder padding mask tgt_words = tgt tgt_batch, tgt_len = tgt_words.size() tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1).expand(tgt_batch, tgt_len, tgt_len) # Encoder padding mask if memory_mask is not None: src_len = memory_mask.size(-1) src_pad_mask = memory_mask.expand(src_batch, tgt_len, src_len) else: src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1).expand(src_batch, tgt_len, src_len) # Pass through the embeddings emb = self.embeddings(input_ids) output = self.pos_emb(emb, step) assert emb.dim() == 3 # len x batch x embedding_dim if state.cache is None: saved_inputs = [] for i in range(self.num_layers): prev_layer_input = None if state.cache is None: if state.previous_input is not None: prev_layer_input = state.previous_layer_inputs[i] output, all_input = self.transformer_layers[i]( output, memory_bank, src_pad_mask, tgt_pad_mask, previous_input=prev_layer_input, layer_cache=state.cache["layer_{}".format(i)] if state.cache is not None else None, step=step, ) if state.cache is None: saved_inputs.append(all_input) if state.cache is None: saved_inputs = torch.stack(saved_inputs) output = self.layer_norm(output) if state.cache is None: state = state.update_state(tgt, saved_inputs) # Decoders in transformers return a tuple. Beam search will fail # if we don't follow this convention. return output, state # , state def init_decoder_state(self, src, memory_bank, with_cache=False): """ Init decoder state """ state = TransformerDecoderState(src) if with_cache: state._init_cache(memory_bank, self.num_layers) return state class PositionalEncoding(nn.Module): def __init__(self, dropout, dim, max_len=5000): pe = torch.zeros(max_len, dim) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim))) pe[:, 0::2] = torch.sin(position.float() * div_term) pe[:, 1::2] = torch.cos(position.float() * div_term) pe = pe.unsqueeze(0) super().__init__() self.register_buffer("pe", pe) self.dropout = nn.Dropout(p=dropout) self.dim = dim def forward(self, emb, step=None): emb = emb * math.sqrt(self.dim) if step: emb = emb + self.pe[:, step][:, None, :] else: emb = emb + self.pe[:, : emb.size(1)] emb = self.dropout(emb) return emb def get_emb(self, emb): return self.pe[:, : emb.size(1)] class TransformerDecoderLayer(nn.Module): """ Args: d_model (int): the dimension of keys/values/queries in MultiHeadedAttention, also the input size of the first-layer of the PositionwiseFeedForward. heads (int): the number of heads for MultiHeadedAttention. d_ff (int): the second-layer of the PositionwiseFeedForward. dropout (float): dropout probability(0-1.0). self_attn_type (string): type of self-attention scaled-dot, average """ def __init__(self, d_model, heads, d_ff, dropout): super().__init__() self.self_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) self.drop = nn.Dropout(dropout) mask = self._get_attn_subsequent_mask(MAX_SIZE) # Register self.mask as a saved_state in TransformerDecoderLayer, so # it gets TransformerDecoderLayer's cuda behavior automatically. self.register_buffer("mask", mask) def forward( self, inputs, memory_bank, src_pad_mask, tgt_pad_mask, previous_input=None, layer_cache=None, step=None, ): """ Args: inputs (`FloatTensor`): `[batch_size x 1 x model_dim]` memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]` src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]` tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]` Returns: (`FloatTensor`, `FloatTensor`, `FloatTensor`): * output `[batch_size x 1 x model_dim]` * attn `[batch_size x 1 x src_len]` * all_input `[batch_size x current_step x model_dim]` """ dec_mask = torch.gt(tgt_pad_mask + self.mask[:, : tgt_pad_mask.size(1), : tgt_pad_mask.size(1)], 0) input_norm = self.layer_norm_1(inputs) all_input = input_norm if previous_input is not None: all_input = torch.cat((previous_input, input_norm), dim=1) dec_mask = None query = self.self_attn(all_input, all_input, input_norm, mask=dec_mask, layer_cache=layer_cache, type="self",) query = self.drop(query) + inputs query_norm = self.layer_norm_2(query) mid = self.context_attn( memory_bank, memory_bank, query_norm, mask=src_pad_mask, layer_cache=layer_cache, type="context", ) output = self.feed_forward(self.drop(mid) + query) return output, all_input # return output def _get_attn_subsequent_mask(self, size): """ Get an attention mask to avoid using the subsequent info. Args: size: int Returns: (`LongTensor`): * subsequent_mask `[1 x size x size]` """ attn_shape = (1, size, size) subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype("uint8") subsequent_mask = torch.from_numpy(subsequent_mask) return subsequent_mask class MultiHeadedAttention(nn.Module): """ Multi-Head Attention module from "Attention is All You Need" :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`. Similar to standard `dot` attention but uses multiple attention distributions simulataneously to select relevant items. .. mermaid:: graph BT A[key] B[value] C[query] O[output] subgraph Attn D[Attn 1] E[Attn 2] F[Attn N] end A --> D C --> D A --> E C --> E A --> F C --> F D --> O E --> O F --> O B --> O Also includes several additional tricks. Args: head_count (int): number of parallel heads model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter """ def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True): assert model_dim % head_count == 0 self.dim_per_head = model_dim // head_count self.model_dim = model_dim super().__init__() self.head_count = head_count self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head) self.linear_values = nn.Linear(model_dim, head_count * self.dim_per_head) self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) self.use_final_linear = use_final_linear if self.use_final_linear: self.final_linear = nn.Linear(model_dim, model_dim) def forward( self, key, value, query, mask=None, layer_cache=None, type=None, predefined_graph_1=None, ): """ Compute the context vector and the attention vectors. Args: key (`FloatTensor`): set of `key_len` key vectors `[batch, key_len, dim]` value (`FloatTensor`): set of `key_len` value vectors `[batch, key_len, dim]` query (`FloatTensor`): set of `query_len` query vectors `[batch, query_len, dim]` mask: binary mask indicating which keys have non-zero attention `[batch, query_len, key_len]` Returns: (`FloatTensor`, `FloatTensor`) : * output context vectors `[batch, query_len, dim]` * one of the attention vectors `[batch, query_len, key_len]` """ batch_size = key.size(0) dim_per_head = self.dim_per_head head_count = self.head_count def shape(x): """ projection """ return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2) def unshape(x): """ compute context """ return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head) # 1) Project key, value, and query. if layer_cache is not None: if type == "self": query, key, value = ( self.linear_query(query), self.linear_keys(query), self.linear_values(query), ) key = shape(key) value = shape(value) if layer_cache is not None: device = key.device if layer_cache["self_keys"] is not None: key = torch.cat((layer_cache["self_keys"].to(device), key), dim=2) if layer_cache["self_values"] is not None: value = torch.cat((layer_cache["self_values"].to(device), value), dim=2) layer_cache["self_keys"] = key layer_cache["self_values"] = value elif type == "context": query = self.linear_query(query) if layer_cache is not None: if layer_cache["memory_keys"] is None: key, value = self.linear_keys(key), self.linear_values(value) key = shape(key) value = shape(value) else: key, value = ( layer_cache["memory_keys"], layer_cache["memory_values"], ) layer_cache["memory_keys"] = key layer_cache["memory_values"] = value else: key, value = self.linear_keys(key), self.linear_values(value) key = shape(key) value = shape(value) else: key = self.linear_keys(key) value = self.linear_values(value) query = self.linear_query(query) key = shape(key) value = shape(value) query = shape(query) # 2) Calculate and scale scores. query = query / math.sqrt(dim_per_head) scores = torch.matmul(query, key.transpose(2, 3)) if mask is not None: mask = mask.unsqueeze(1).expand_as(scores) scores = scores.masked_fill(mask, -1e18) # 3) Apply attention dropout and compute context vectors. attn = self.softmax(scores) if predefined_graph_1 is not None: attn_masked = attn[:, -1] * predefined_graph_1 attn_masked = attn_masked / (torch.sum(attn_masked, 2).unsqueeze(2) + 1e-9) attn = torch.cat([attn[:, :-1], attn_masked.unsqueeze(1)], 1) drop_attn = self.dropout(attn) if self.use_final_linear: context = unshape(torch.matmul(drop_attn, value)) output = self.final_linear(context) return output else: context = torch.matmul(drop_attn, value) return context class DecoderState(object): """Interface for grouping together the current state of a recurrent decoder. In the simplest case just represents the hidden state of the model. But can also be used for implementing various forms of input_feeding and non-recurrent models. Modules need to implement this to utilize beam search decoding. """ def detach(self): """ Need to document this """ self.hidden = tuple([_.detach() for _ in self.hidden]) self.input_feed = self.input_feed.detach() def beam_update(self, idx, positions, beam_size): """ Need to document this """ for e in self._all: sizes = e.size() br = sizes[1] if len(sizes) == 3: sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2])[:, :, idx] else: sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2], sizes[3])[:, :, idx] sent_states.data.copy_(sent_states.data.index_select(1, positions)) def map_batch_fn(self, fn): raise NotImplementedError() class TransformerDecoderState(DecoderState): """ Transformer Decoder state base class """ def __init__(self, src): """ Args: src (FloatTensor): a sequence of source words tensors with optional feature tensors, of size (len x batch). """ self.src = src self.previous_input = None self.previous_layer_inputs = None self.cache = None @property def _all(self): """ Contains attributes that need to be updated in self.beam_update(). """ if self.previous_input is not None and self.previous_layer_inputs is not None: return (self.previous_input, self.previous_layer_inputs, self.src) else: return (self.src,) def detach(self): if self.previous_input is not None: self.previous_input = self.previous_input.detach() if self.previous_layer_inputs is not None: self.previous_layer_inputs = self.previous_layer_inputs.detach() self.src = self.src.detach() def update_state(self, new_input, previous_layer_inputs): state = TransformerDecoderState(self.src) state.previous_input = new_input state.previous_layer_inputs = previous_layer_inputs return state def _init_cache(self, memory_bank, num_layers): self.cache = {} for l in range(num_layers): layer_cache = {"memory_keys": None, "memory_values": None} layer_cache["self_keys"] = None layer_cache["self_values"] = None self.cache["layer_{}".format(l)] = layer_cache def repeat_beam_size_times(self, beam_size): """ Repeat beam_size times along batch dimension. """ self.src = self.src.data.repeat(1, beam_size, 1) def map_batch_fn(self, fn): def _recursive_map(struct, batch_dim=0): for k, v in struct.items(): if v is not None: if isinstance(v, dict): _recursive_map(v) else: struct[k] = fn(v, batch_dim) self.src = fn(self.src, 0) if self.cache is not None: _recursive_map(self.cache) def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class PositionwiseFeedForward(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability in :math:`[0, 1)`. """ def __init__(self, d_model, d_ff, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) self.actv = gelu self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) def forward(self, x): inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output + x # # TRANSLATOR # The following code is used to generate summaries using the # pre-trained weights and beam search. # def build_predictor(args, tokenizer, symbols, model, logger=None): # we should be able to refactor the global scorer a lot scorer = GNMTGlobalScorer(args.alpha, length_penalty="wu") translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger) return translator class GNMTGlobalScorer(object): """ NMT re-ranking score from "Google's Neural Machine Translation System" :cite:`wu2016google` Args: alpha (float): length parameter beta (float): coverage parameter """ def __init__(self, alpha, length_penalty): self.alpha = alpha penalty_builder = PenaltyBuilder(length_penalty) self.length_penalty = penalty_builder.length_penalty() def score(self, beam, logprobs): """ Rescores a prediction based on penalty functions """ normalized_probs = self.length_penalty(beam, logprobs, self.alpha) return normalized_probs class PenaltyBuilder(object): """ Returns the Length and Coverage Penalty function for Beam Search. Args: length_pen (str): option name of length pen cov_pen (str): option name of cov pen """ def __init__(self, length_pen): self.length_pen = length_pen def length_penalty(self): if self.length_pen == "wu": return self.length_wu elif self.length_pen == "avg": return self.length_average else: return self.length_none """ Below are all the different penalty terms implemented so far """ def length_wu(self, beam, logprobs, alpha=0.0): """ NMT length re-ranking score from "Google's Neural Machine Translation System" :cite:`wu2016google`. """ modifier = ((5 + len(beam.next_ys)) ** alpha) / ((5 + 1) ** alpha) return logprobs / modifier def length_average(self, beam, logprobs, alpha=0.0): """ Returns the average probability of tokens in a sequence. """ return logprobs / len(beam.next_ys) def length_none(self, beam, logprobs, alpha=0.0, beta=0.0): """ Returns unmodified scores. """ return logprobs class Translator(object): """ Uses a model to translate a batch of sentences. Args: model (:obj:`onmt.modules.NMTModel`): NMT model to use for translation fields (dict of Fields): data fields beam_size (int): size of beam to use n_best (int): number of translations produced max_length (int): maximum length output to produce global_scores (:obj:`GlobalScorer`): object to rescore final translations copy_attn (bool): use copy attention during translation beam_trace (bool): trace beam search for debugging logger(logging.Logger): logger. """ def __init__(self, args, model, vocab, symbols, global_scorer=None, logger=None): self.logger = logger self.args = args self.model = model self.generator = self.model.generator self.vocab = vocab self.symbols = symbols self.start_token = symbols["BOS"] self.end_token = symbols["EOS"] self.global_scorer = global_scorer self.beam_size = args.beam_size self.min_length = args.min_length self.max_length = args.max_length def translate(self, batch, step, attn_debug=False): """ Generates summaries from one batch of data. """ self.model.eval() with torch.no_grad(): batch_data = self.translate_batch(batch) translations = self.from_batch(batch_data) return translations def translate_batch(self, batch, fast=False): """ Translate a batch of sentences. Mostly a wrapper around :obj:`Beam`. Args: batch (:obj:`Batch`): a batch from a dataset object data (:obj:`Dataset`): the dataset object fast (bool): enables fast beam search (may not support all features) Todo: Shouldn't need the original dataset. """ with torch.no_grad(): return self._fast_translate_batch(batch, self.max_length, min_length=self.min_length) # Where the beam search lives # I have no idea why it is being called from the method above def _fast_translate_batch(self, batch, max_length, min_length=0): """ Beam Search using the encoder inputs contained in `batch`. """ # The batch object is funny # Instead of just looking at the size of the arguments we encapsulate # a size argument. # Where is it defined? beam_size = self.beam_size batch_size = batch.batch_size src = batch.src segs = batch.segs mask_src = batch.mask_src src_features = self.model.bert(src, segs, mask_src) dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True) device = src_features.device # Tile states and memory beam_size times. dec_states.map_batch_fn(lambda state, dim: tile(state, beam_size, dim=dim)) src_features = tile(src_features, beam_size, dim=0) batch_offset = torch.arange(batch_size, dtype=torch.long, device=device) beam_offset = torch.arange(0, batch_size * beam_size, step=beam_size, dtype=torch.long, device=device) alive_seq = torch.full([batch_size * beam_size, 1], self.start_token, dtype=torch.long, device=device) # Give full probability to the first beam on the first step. topk_log_probs = torch.tensor([0.0] + [float("-inf")] * (beam_size - 1), device=device).repeat(batch_size) # Structure that holds finished hypotheses. hypotheses = [[] for _ in range(batch_size)] # noqa: F812 results = {} results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812 results["scores"] = [[] for _ in range(batch_size)] # noqa: F812 results["gold_score"] = [0] * batch_size results["batch"] = batch for step in range(max_length): decoder_input = alive_seq[:, -1].view(1, -1) # Decoder forward. decoder_input = decoder_input.transpose(0, 1) dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states, step=step) # Generator forward. log_probs = self.generator(dec_out.transpose(0, 1).squeeze(0)) vocab_size = log_probs.size(-1) if step < min_length: log_probs[:, self.end_token] = -1e20 # Multiply probs by the beam probability. log_probs += topk_log_probs.view(-1).unsqueeze(1) alpha = self.global_scorer.alpha length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha # Flatten probs into a list of possibilities. curr_scores = log_probs / length_penalty if self.args.block_trigram: cur_len = alive_seq.size(1) if cur_len > 3: for i in range(alive_seq.size(0)): fail = False words = [int(w) for w in alive_seq[i]] words = [self.vocab.ids_to_tokens[w] for w in words] words = " ".join(words).replace(" ##", "").split() if len(words) <= 3: continue trigrams = [(words[i - 1], words[i], words[i + 1]) for i in range(1, len(words) - 1)] trigram = tuple(trigrams[-1]) if trigram in trigrams[:-1]: fail = True if fail: curr_scores[i] = -10e20 curr_scores = curr_scores.reshape(-1, beam_size * vocab_size) topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1) # Recover log probs. topk_log_probs = topk_scores * length_penalty # Resolve beam origin and true word ids. topk_beam_index = topk_ids.div(vocab_size) topk_ids = topk_ids.fmod(vocab_size) # Map beam_index to batch_index in the flat representation. batch_index = topk_beam_index + beam_offset[: topk_beam_index.size(0)].unsqueeze(1) select_indices = batch_index.view(-1) # Append last prediction. alive_seq = torch.cat([alive_seq.index_select(0, select_indices), topk_ids.view(-1, 1)], -1) is_finished = topk_ids.eq(self.end_token) if step + 1 == max_length: is_finished.fill_(1) # End condition is top beam is finished. end_condition = is_finished[:, 0].eq(1) # Save finished hypotheses. if is_finished.any(): predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1)) for i in range(is_finished.size(0)): b = batch_offset[i] if end_condition[i]: is_finished[i].fill_(1) finished_hyp = is_finished[i].nonzero().view(-1) # Store finished hypotheses for this batch. for j in finished_hyp: hypotheses[b].append((topk_scores[i, j], predictions[i, j, 1:])) # If the batch reached the end, save the n_best hypotheses. if end_condition[i]: best_hyp = sorted(hypotheses[b], key=lambda x: x[0], reverse=True) score, pred = best_hyp[0] results["scores"][b].append(score) results["predictions"][b].append(pred) non_finished = end_condition.eq(0).nonzero().view(-1) # If all sentences are translated, no need to go further. if len(non_finished) == 0: break # Remove finished batches for the next step. topk_log_probs = topk_log_probs.index_select(0, non_finished) batch_index = batch_index.index_select(0, non_finished) batch_offset = batch_offset.index_select(0, non_finished) alive_seq = predictions.index_select(0, non_finished).view(-1, alive_seq.size(-1)) # Reorder states. select_indices = batch_index.view(-1) src_features = src_features.index_select(0, select_indices) dec_states.map_batch_fn(lambda state, dim: state.index_select(dim, select_indices)) return results def from_batch(self, translation_batch): batch = translation_batch["batch"] assert len(translation_batch["gold_score"]) == len(translation_batch["predictions"]) batch_size = batch.batch_size preds, _, _, tgt_str, src = ( translation_batch["predictions"], translation_batch["scores"], translation_batch["gold_score"], batch.tgt_str, batch.src, ) translations = [] for b in range(batch_size): pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]]) pred_sents = " ".join(pred_sents).replace(" ##", "") gold_sent = " ".join(tgt_str[b].split()) raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500] raw_src = " ".join(raw_src) translation = (pred_sents, gold_sent, raw_src) translations.append(translation) return translations def tile(x, count, dim=0): """ Tiles x on dimension dim count times. """ perm = list(range(len(x.size()))) if dim != 0: perm[0], perm[dim] = perm[dim], perm[0] x = x.permute(perm).contiguous() out_size = list(x.size()) out_size[0] *= count batch = x.size(0) x = x.view(batch, -1).transpose(0, 1).repeat(count, 1).transpose(0, 1).contiguous().view(*out_size) if dim != 0: x = x.permute(perm).contiguous() return x # # Optimizer for training. We keep this here in case we want to add # a finetuning script. # class BertSumOptimizer(object): """ Specific optimizer for BertSum. As described in [1], the authors fine-tune BertSum for abstractive summarization using two Adam Optimizers with different warm-up steps and learning rate. They also use a custom learning rate scheduler. [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." arXiv preprint arXiv:1908.08345 (2019). """ def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8): self.encoder = model.encoder self.decoder = model.decoder self.lr = lr self.warmup_steps = warmup_steps self.optimizers = { "encoder": torch.optim.Adam( model.encoder.parameters(), lr=lr["encoder"], betas=(beta_1, beta_2), eps=eps, ), "decoder": torch.optim.Adam( model.decoder.parameters(), lr=lr["decoder"], betas=(beta_1, beta_2), eps=eps, ), } self._step = 0 self.current_learning_rates = {} def _update_rate(self, stack): return self.lr[stack] * min(self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-1.5)) def zero_grad(self): self.optimizer_decoder.zero_grad() self.optimizer_encoder.zero_grad() def step(self): self._step += 1 for stack, optimizer in self.optimizers.items(): new_rate = self._update_rate(stack) for param_group in optimizer.param_groups: param_group["lr"] = new_rate optimizer.step() self.current_learning_rates[stack] = new_rate
38,009
36.010711
118
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/bertabs/convert_bertabs_original_pytorch_checkpoint.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert BertExtAbs's checkpoints. The script looks like it is doing something trivial but it is not. The "weights" proposed by the authors are actually the entire model pickled. We need to load the model within the original codebase to be able to only save its `state_dict`. """ import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) SAMPLE_TEXT = "Hello world! cécé herlolip" BertAbsConfig = namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def convert_bertabs_checkpoints(path_to_checkpoints, dump_path): """ Copy/paste and tweak the pre-trained weights provided by the creators of BertAbs for the internal architecture. """ # Instantiate the authors' model with the pre-trained weights config = BertAbsConfig( temp_dir=".", finetune_bert=False, large=False, share_emb=True, use_bert_emb=False, encoder="bert", max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, ) checkpoints = torch.load(path_to_checkpoints, lambda storage, loc: storage) original = AbsSummarizer(config, torch.device("cpu"), checkpoints) original.eval() new_model = BertAbsSummarizer(config, torch.device("cpu")) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("convert the model") new_model.bert.load_state_dict(original.bert.state_dict()) new_model.decoder.load_state_dict(original.decoder.state_dict()) new_model.generator.load_state_dict(original.generator.state_dict()) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("Make sure that the models' outputs are identical") tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") # prepare the model inputs encoder_input_ids = tokenizer.encode("This is sample éàalj'-.") encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(encoder_input_ids))) encoder_input_ids = torch.tensor(encoder_input_ids).unsqueeze(0) decoder_input_ids = tokenizer.encode("This is sample 3 éàalj'-.") decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(decoder_input_ids))) decoder_input_ids = torch.tensor(decoder_input_ids).unsqueeze(0) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0 # forward pass src = encoder_input_ids tgt = decoder_input_ids segs = token_type_ids = None clss = None mask_src = encoder_attention_mask = None mask_tgt = decoder_attention_mask = None mask_cls = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical output_original_model = original(src, tgt, segs, clss, mask_src, mask_tgt, mask_cls)[0] output_original_generator = original.generator(output_original_model) output_converted_model = new_model( encoder_input_ids, decoder_input_ids, token_type_ids, encoder_attention_mask, decoder_attention_mask )[0] output_converted_generator = new_model.generator(output_converted_model) maximum_absolute_difference = torch.max(torch.abs(output_converted_model - output_original_model)).item() print("Maximum absolute difference beween weights: {:.2f}".format(maximum_absolute_difference)) maximum_absolute_difference = torch.max(torch.abs(output_converted_generator - output_original_generator)).item() print("Maximum absolute difference beween weights: {:.2f}".format(maximum_absolute_difference)) are_identical = torch.allclose(output_converted_model, output_original_model, atol=1e-3) if are_identical: logging.info("all weights are equal up to 1e-3") else: raise ValueError("the weights are different. The new model is likely different from the original one.") # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("saving the model's state dictionary") torch.save( new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) args = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
6,452
35.457627
118
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/bertabs/utils_summarization.py
import os from collections import deque import torch from torch.utils.data import Dataset # ------------ # Data loading # ------------ class CNNDMDataset(Dataset): """ Abstracts the dataset used to train seq2seq models. The class will process the documents that are located in the specified folder. The preprocessing will work on any document that is reasonably formatted. On the CNN/DailyMail dataset it will extract both the story and the summary. CNN/Daily News: The CNN/Daily News raw datasets are downloaded from [1]. The stories are stored in different files; the summary appears at the end of the story as sentences that are prefixed by the special `@highlight` line. To process the data, untar both datasets in the same folder, and pass the path to this folder as the "data_dir argument. The formatting code was inspired by [2]. [1] https://cs.nyu.edu/~kcho/ [2] https://github.com/abisee/cnn-dailymail/ """ def __init__(self, path="", prefix="train"): """ We initialize the class by listing all the documents to summarize. Files are not read in memory due to the size of some datasets (like CNN/DailyMail). """ assert os.path.isdir(path) self.documents = [] story_filenames_list = os.listdir(path) for story_filename in story_filenames_list: if "summary" in story_filename: continue path_to_story = os.path.join(path, story_filename) if not os.path.isfile(path_to_story): continue self.documents.append(path_to_story) def __len__(self): """ Returns the number of documents. """ return len(self.documents) def __getitem__(self, idx): document_path = self.documents[idx] document_name = document_path.split("/")[-1] with open(document_path, encoding="utf-8") as source: raw_story = source.read() story_lines, summary_lines = process_story(raw_story) return document_name, story_lines, summary_lines def process_story(raw_story): """ Extract the story and summary from a story file. Arguments: raw_story (str): content of the story file as an utf-8 encoded string. Raises: IndexError: If the story is empty or contains no highlights. """ nonempty_lines = list(filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")])) # for some unknown reason some lines miss a period, add it nonempty_lines = [_add_missing_period(line) for line in nonempty_lines] # gather article lines story_lines = [] lines = deque(nonempty_lines) while True: try: element = lines.popleft() if element.startswith("@highlight"): break story_lines.append(element) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines)) return story_lines, summary_lines def _add_missing_period(line): END_TOKENS = [".", "!", "?", "...", "'", "`", '"', "\u2019", "\u2019", ")"] if line.startswith("@highlight"): return line if line[-1] in END_TOKENS: return line return line + "." # -------------------------- # Encoding and preprocessing # -------------------------- def truncate_or_pad(sequence, block_size, pad_token_id): """ Adapt the source and target sequences' lengths to the block size. If the sequence is shorter we append padding token to the right of the sequence. """ if len(sequence) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(sequence))) return sequence def build_mask(sequence, pad_token_id): """ Builds the mask. The attention mechanism will only attend to positions with value 1. """ mask = torch.ones_like(sequence) idx_pad_tokens = sequence == pad_token_id mask[idx_pad_tokens] = 0 return mask def encode_for_summarization(story_lines, summary_lines, tokenizer): """ Encode the story and summary lines, and join them as specified in [1] by using `[SEP] [CLS]` tokens to separate sentences. """ story_lines_token_ids = [tokenizer.encode(line) for line in story_lines] story_token_ids = [token for sentence in story_lines_token_ids for token in sentence] summary_lines_token_ids = [tokenizer.encode(line) for line in summary_lines] summary_token_ids = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def compute_token_type_ids(batch, separator_token_id): """ Segment embeddings as described in [1] The values {0,1} were found in the repository [2]. Attributes: batch: torch.Tensor, size [batch_size, block_size] Batch of input. separator_token_id: int The value of the token that separates the segments. [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." arXiv preprint arXiv:1908.08345 (2019). [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217) """ batch_embeddings = [] for sequence in batch: sentence_num = -1 embeddings = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2) batch_embeddings.append(embeddings) return torch.tensor(batch_embeddings)
5,763
33.309524
106
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/bertabs/test_utils_summarization.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class SummarizationDataProcessingTest(unittest.TestCase): def setUp(self): self.block_size = 10 def test_fit_to_block_sequence_too_small(self): """ Pad the sequence with 0 if the sequence is smaller than the block size.""" sequence = [1, 2, 3, 4] expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(sequence, self.block_size, 0), expected_output) def test_fit_to_block_sequence_fit_exactly(self): """ Do nothing if the sequence is the right size. """ sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(sequence, self.block_size, 0), expected_output) def test_fit_to_block_sequence_too_big(self): """ Truncate the sequence if it is too long. """ sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(sequence, self.block_size, 0), expected_output) def test_process_story_no_highlights(self): """ Processing a story with no highlights returns an empty list for the summary. """ raw_story = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" _, summary_lines = process_story(raw_story) self.assertEqual(summary_lines, []) def test_process_empty_story(self): """ An empty story returns an empty collection of lines. """ raw_story = "" story_lines, summary_lines = process_story(raw_story) self.assertEqual(story_lines, []) self.assertEqual(summary_lines, []) def test_process_story_with_missing_period(self): raw_story = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) story_lines, summary_lines = process_story(raw_story) expected_story_lines = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(expected_story_lines, story_lines) expected_summary_lines = ["It was the best of times."] self.assertEqual(expected_summary_lines, summary_lines) def test_build_mask_no_padding(self): sequence = torch.tensor([1, 2, 3, 4]) expected = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(sequence, 0).numpy(), expected.numpy()) def test_build_mask(self): sequence = torch.tensor([1, 2, 3, 4, 23, 23, 23]) expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(sequence, 23).numpy(), expected.numpy()) def test_build_mask_with_padding_equal_to_one(self): sequence = torch.tensor([8, 2, 3, 4, 1, 1, 1]) expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(sequence, 1).numpy(), expected.numpy()) def test_compute_token_type_ids(self): separator = 101 batch = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]) expected = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) result = compute_token_type_ids(batch, separator) np.testing.assert_array_equal(result, expected)
4,444
43.009901
99
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/seq2seq/bertabs/run_summarization.py
#! /usr/bin/python3 import argparse import logging import os import sys from collections import namedtuple import torch from torch.utils.data import DataLoader, SequentialSampler from tqdm import tqdm from modeling_bertabs import BertAbs, build_predictor from transformers import BertTokenizer from .utils_summarization import ( CNNDMDataset, build_mask, compute_token_type_ids, encode_for_summarization, truncate_or_pad, ) logger = logging.getLogger(__name__) logging.basicConfig(stream=sys.stdout, level=logging.INFO) Batch = namedtuple("Batch", ["document_names", "batch_size", "src", "segs", "mask_src", "tgt_str"]) def evaluate(args): tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True) model = BertAbs.from_pretrained("remi/bertabs-finetuned-extractive-abstractive-summarization") model.to(args.device) model.eval() symbols = { "BOS": tokenizer.vocab["[unused0]"], "EOS": tokenizer.vocab["[unused1]"], "PAD": tokenizer.vocab["[PAD]"], } if args.compute_rouge: reference_summaries = [] generated_summaries = [] import rouge import nltk nltk.download("punkt") rouge_evaluator = rouge.Rouge( metrics=["rouge-n", "rouge-l"], max_n=2, limit_length=True, length_limit=args.beam_size, length_limit_type="words", apply_avg=True, apply_best=False, alpha=0.5, # Default F1_score weight_factor=1.2, stemming=True, ) # these (unused) arguments are defined to keep the compatibility # with the legacy code and will be deleted in a next iteration. args.result_path = "" args.temp_dir = "" data_iterator = build_data_iterator(args, tokenizer) predictor = build_predictor(args, tokenizer, symbols, model) logger.info("***** Running evaluation *****") logger.info(" Number examples = %d", len(data_iterator.dataset)) logger.info(" Batch size = %d", args.batch_size) logger.info("") logger.info("***** Beam Search parameters *****") logger.info(" Beam size = %d", args.beam_size) logger.info(" Minimum length = %d", args.min_length) logger.info(" Maximum length = %d", args.max_length) logger.info(" Alpha (length penalty) = %.2f", args.alpha) logger.info(" Trigrams %s be blocked", ("will" if args.block_trigram else "will NOT")) for batch in tqdm(data_iterator): batch_data = predictor.translate_batch(batch) translations = predictor.from_batch(batch_data) summaries = [format_summary(t) for t in translations] save_summaries(summaries, args.summaries_output_dir, batch.document_names) if args.compute_rouge: reference_summaries += batch.tgt_str generated_summaries += summaries if args.compute_rouge: scores = rouge_evaluator.get_scores(generated_summaries, reference_summaries) str_scores = format_rouge_scores(scores) save_rouge_scores(str_scores) print(str_scores) def save_summaries(summaries, path, original_document_name): """ Write the summaries in fies that are prefixed by the original files' name with the `_summary` appended. Attributes: original_document_names: List[string] Name of the document that was summarized. path: string Path were the summaries will be written summaries: List[string] The summaries that we produced. """ for summary, document_name in zip(summaries, original_document_name): # Prepare the summary file's name if "." in document_name: bare_document_name = ".".join(document_name.split(".")[:-1]) extension = document_name.split(".")[-1] name = bare_document_name + "_summary." + extension else: name = document_name + "_summary" file_path = os.path.join(path, name) with open(file_path, "w") as output: output.write(summary) def format_summary(translation): """ Transforms the output of the `from_batch` function into nicely formatted summaries. """ raw_summary, _, _ = translation summary = ( raw_summary.replace("[unused0]", "") .replace("[unused3]", "") .replace("[PAD]", "") .replace("[unused1]", "") .replace(r" +", " ") .replace(" [unused2] ", ". ") .replace("[unused2]", "") .strip() ) return summary def format_rouge_scores(scores): return """\n ****** ROUGE SCORES ****** ** ROUGE 1 F1 >> {:.3f} Precision >> {:.3f} Recall >> {:.3f} ** ROUGE 2 F1 >> {:.3f} Precision >> {:.3f} Recall >> {:.3f} ** ROUGE L F1 >> {:.3f} Precision >> {:.3f} Recall >> {:.3f}""".format( scores["rouge-1"]["f"], scores["rouge-1"]["p"], scores["rouge-1"]["r"], scores["rouge-2"]["f"], scores["rouge-2"]["p"], scores["rouge-2"]["r"], scores["rouge-l"]["f"], scores["rouge-l"]["p"], scores["rouge-l"]["r"], ) def save_rouge_scores(str_scores): with open("rouge_scores.txt", "w") as output: output.write(str_scores) # # LOAD the dataset # def build_data_iterator(args, tokenizer): dataset = load_and_cache_examples(args, tokenizer) sampler = SequentialSampler(dataset) def collate_fn(data): return collate(data, tokenizer, block_size=512, device=args.device) iterator = DataLoader(dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate_fn,) return iterator def load_and_cache_examples(args, tokenizer): dataset = CNNDMDataset(args.documents_dir) return dataset def collate(data, tokenizer, block_size, device): """ Collate formats the data passed to the data loader. In particular we tokenize the data batch after batch to avoid keeping them all in memory. We output the data as a namedtuple to fit the original BertAbs's API. """ data = [x for x in data if not len(x[1]) == 0] # remove empty_files names = [name for name, _, _ in data] summaries = [" ".join(summary_list) for _, _, summary_list in data] encoded_text = [encode_for_summarization(story, summary, tokenizer) for _, story, summary in data] encoded_stories = torch.tensor( [truncate_or_pad(story, block_size, tokenizer.pad_token_id) for story, _ in encoded_text] ) encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id) encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id) batch = Batch( document_names=names, batch_size=len(encoded_stories), src=encoded_stories.to(device), segs=encoder_token_type_ids.to(device), mask_src=encoder_mask.to(device), tgt_str=summaries, ) return batch def decode_summary(summary_tokens, tokenizer): """ Decode the summary and return it in a format suitable for evaluation. """ summary_tokens = summary_tokens.to("cpu").numpy() summary = tokenizer.decode(summary_tokens) sentences = summary.split(".") sentences = [s + "." for s in sentences] return sentences def main(): """ The main function defines the interface with the users. """ parser = argparse.ArgumentParser() parser.add_argument( "--documents_dir", default=None, type=str, required=True, help="The folder where the documents to summarize are located.", ) parser.add_argument( "--summaries_output_dir", default=None, type=str, required=False, help="The folder in wich the summaries should be written. Defaults to the folder where the documents are", ) parser.add_argument( "--compute_rouge", default=False, type=bool, required=False, help="Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.", ) # EVALUATION options parser.add_argument( "--no_cuda", default=False, type=bool, help="Whether to force the execution on CPU.", ) parser.add_argument( "--batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.", ) # BEAM SEARCH arguments parser.add_argument( "--min_length", default=50, type=int, help="Minimum number of tokens for the summaries.", ) parser.add_argument( "--max_length", default=200, type=int, help="Maixmum number of tokens for the summaries.", ) parser.add_argument( "--beam_size", default=5, type=int, help="The number of beams to start with for each example.", ) parser.add_argument( "--alpha", default=0.95, type=float, help="The value of alpha for the length penalty in the beam search.", ) parser.add_argument( "--block_trigram", default=True, type=bool, help="Whether to block the existence of repeating trigrams in the text generated by beam search.", ) args = parser.parse_args() # Select device (distibuted not available) args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") # Check the existence of directories if not args.summaries_output_dir: args.summaries_output_dir = args.documents_dir if not documents_dir_is_valid(args.documents_dir): raise FileNotFoundError( "We could not find the directory you specified for the documents to summarize, or it was empty. Please specify a valid path." ) os.makedirs(args.summaries_output_dir, exist_ok=True) evaluate(args) def documents_dir_is_valid(path): if not os.path.exists(path): return False file_list = os.listdir(path) if len(file_list) == 0: return False return True if __name__ == "__main__": main()
10,015
29.818462
137
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/contrib/run_transfo_xl.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Transformer XL model evaluation script. Adapted from https://github.com/kimiyoung/transformer-xl. In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/eval.py This script with default values evaluates a pretrained Transformer-XL on WikiText 103 """ import argparse import logging import math import time import torch from transformers import TransfoXLCorpus, TransfoXLLMHeadModel logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description="PyTorch Transformer Language Model") parser.add_argument("--model_name", type=str, default="transfo-xl-wt103", help="pretrained model name") parser.add_argument( "--split", type=str, default="test", choices=["all", "valid", "test"], help="which split to evaluate" ) parser.add_argument("--batch_size", type=int, default=10, help="batch size") parser.add_argument("--tgt_len", type=int, default=128, help="number of tokens to predict") parser.add_argument("--ext_len", type=int, default=0, help="length of the extended context") parser.add_argument("--mem_len", type=int, default=1600, help="length of the retained previous heads") parser.add_argument("--clamp_len", type=int, default=1000, help="max positional embedding index") parser.add_argument("--no_cuda", action="store_true", help="Do not use CUDA even though CUA is available") parser.add_argument("--work_dir", type=str, required=True, help="path to the work_dir") parser.add_argument("--no_log", action="store_true", help="do not log the eval result") parser.add_argument("--same_length", action="store_true", help="set same length attention with masking") parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") args = parser.parse_args() assert args.ext_len >= 0, "extended context length must be non-negative" if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") logger.info("device: {}".format(device)) # Load a pre-processed dataset # You can also build the corpus yourself using TransfoXLCorpus methods # The pre-processing involve computing word frequencies to prepare the Adaptive input and SoftMax # and tokenizing the dataset # The pre-processed corpus is a convertion (using the conversion script ) corpus = TransfoXLCorpus.from_pretrained(args.model_name) va_iter = corpus.get_iterator("valid", args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) te_iter = corpus.get_iterator("test", args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) # Load a pre-trained model model = TransfoXLLMHeadModel.from_pretrained(args.model_name) model.to(device) logger.info( "Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}".format( args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len ) ) model.reset_length(args.tgt_len, args.ext_len, args.mem_len) if args.clamp_len > 0: model.clamp_len = args.clamp_len if args.same_length: model.same_length = True ############################################################################### # Evaluation code ############################################################################### def evaluate(eval_iter): # Turn on evaluation mode which disables dropout. model.eval() total_len, total_loss = 0, 0.0 start_time = time.time() with torch.no_grad(): mems = None for idx, (data, target, seq_len) in enumerate(eval_iter): ret = model(data, lm_labels=target, mems=mems) loss, _, mems = ret loss = loss.mean() total_loss += seq_len * loss.item() total_len += seq_len total_time = time.time() - start_time logger.info("Time : {:.2f}s, {:.2f}ms/segment".format(total_time, 1000 * total_time / (idx + 1))) return total_loss / total_len # Run on test data. if args.split == "all": test_loss = evaluate(te_iter) valid_loss = evaluate(va_iter) elif args.split == "valid": valid_loss = evaluate(va_iter) test_loss = None elif args.split == "test": test_loss = evaluate(te_iter) valid_loss = None def format_log(loss, split): log_str = "| {0} loss {1:5.2f} | {0} ppl {2:9.3f} ".format(split, loss, math.exp(loss)) return log_str log_str = "" if valid_loss is not None: log_str += format_log(valid_loss, "valid") if test_loss is not None: log_str += format_log(test_loss, "test") logger.info("=" * 100) logger.info(log_str) logger.info("=" * 100) if __name__ == "__main__": main()
6,206
41.806897
116
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/contrib/run_swag.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner. Finetuning the library models for multiple choice on SWAG (Bert). """ import argparse import csv import glob import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import WEIGHTS_NAME, AdamW, AutoConfig, AutoTokenizer, get_linear_schedule_with_warmup from transformers.modeling_auto import AutoModelForMultipleChoice try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) class SwagExample(object): """A single training/test example for the SWAG dataset.""" def __init__(self, swag_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, label=None): self.swag_id = swag_id self.context_sentence = context_sentence self.start_ending = start_ending self.endings = [ ending_0, ending_1, ending_2, ending_3, ] self.label = label def __str__(self): return self.__repr__() def __repr__(self): attributes = [ "swag_id: {}".format(self.swag_id), "context_sentence: {}".format(self.context_sentence), "start_ending: {}".format(self.start_ending), "ending_0: {}".format(self.endings[0]), "ending_1: {}".format(self.endings[1]), "ending_2: {}".format(self.endings[2]), "ending_3: {}".format(self.endings[3]), ] if self.label is not None: attributes.append("label: {}".format(self.label)) return ", ".join(attributes) class InputFeatures(object): def __init__(self, example_id, choices_features, label): self.example_id = example_id self.choices_features = [ {"input_ids": input_ids, "input_mask": input_mask, "segment_ids": segment_ids} for _, input_ids, input_mask, segment_ids in choices_features ] self.label = label def read_swag_examples(input_file, is_training=True): with open(input_file, "r", encoding="utf-8") as f: lines = list(csv.reader(f)) if is_training and lines[0][-1] != "label": raise ValueError("For training, the input file must contain a label column.") examples = [ SwagExample( swag_id=line[2], context_sentence=line[4], start_ending=line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". ending_0=line[7], ending_1=line[8], ending_2=line[9], ending_3=line[10], label=int(line[11]) if is_training else None, ) for line in lines[1:] # we skip the line with the column names ] return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): """Loads a data file into a list of `InputBatch`s.""" # Swag is a multiple choice task. To perform this task using Bert, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given Swag example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. features = [] for example_index, example in tqdm(enumerate(examples)): context_tokens = tokenizer.tokenize(example.context_sentence) start_ending_tokens = tokenizer.tokenize(example.start_ending) choices_features = [] for ending_index, ending in enumerate(example.endings): # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length choices_features.append((tokens, input_ids, input_mask, segment_ids)) label = example.label if example_index < 5: logger.info("*** Example ***") logger.info("swag_id: {}".format(example.swag_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("tokens: {}".format(" ".join(tokens))) logger.info("input_ids: {}".format(" ".join(map(str, input_ids)))) logger.info("input_mask: {}".format(" ".join(map(str, input_mask)))) logger.info("segment_ids: {}".format(" ".join(map(str, segment_ids)))) if is_training: logger.info("label: {}".format(label)) features.append(InputFeatures(example_id=example.swag_id, choices_features=choices_features, label=label)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def select_field(features, field): return [[choice[field] for choice in feature.choices_features] for feature in features] def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join( os.path.dirname(input_file), "cached_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_swag_examples(input_file) features = convert_examples_to_features(examples, tokenizer, args.max_seq_length, not evaluate) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor(select_field(features, "input_ids"), dtype=torch.long) all_input_mask = torch.tensor(select_field(features, "input_mask"), dtype=torch.long) all_segment_ids = torch.tensor(select_field(features, "segment_ids"), dtype=torch.long) all_label = torch.tensor([f.label for f in features], dtype=torch.long) if evaluate: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) else: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) if output_examples: return dataset, examples, features return dataset def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], # 'token_type_ids': None if args.model_type == 'xlm' else batch[2], "token_type_ids": batch[2], "labels": batch[3], } # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[5], # 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_vocabulary(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], # 'token_type_ids': None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids "token_type_ids": batch[2], "labels": batch[3], } # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[4], # 'p_mask': batch[5]}) outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() logits = logits.detach().cpu().numpy() label_ids = inputs["labels"].to("cpu").numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_accuracy += tmp_eval_accuracy nb_eval_steps += 1 nb_eval_examples += inputs["input_ids"].size(0) eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info("%s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--train_file", default=None, type=str, required=True, help="SWAG csv for training. E.g., train.csv" ) parser.add_argument( "--predict_file", default=None, type=str, required=True, help="SWAG csv for predictions. E.g., val.csv or test.csv", ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--max_seq_length", default=384, type=int, help="The maximum total input sequence length after tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab config = AutoConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,) model = AutoModelForMultipleChoice.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config ) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.local_rank == -1 or torch.distributed.get_rank() == 0: logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = AutoModelForMultipleChoice.from_pretrained(args.output_dir) tokenizer = AutoTokenizer.from_pretrained(args.output_dir) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: checkpoints = [args.output_dir] else: # if do_train is False and do_eval is true, load model directly from pretrained. checkpoints = [args.model_name_or_path] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = AutoModelForMultipleChoice.from_pretrained(checkpoint) tokenizer = AutoTokenizer.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
29,878
41.32153
150
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/contrib/run_openai_gpt.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT model fine-tuning script. Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset: python run_openai_gpt.py \ --model_name openai-gpt \ --do_train \ --do_eval \ --train_dataset "$ROC_STORIES_DIR/cloze_test_val__spring2016 - cloze_test_ALL_val.csv" \ --eval_dataset "$ROC_STORIES_DIR/cloze_test_test__spring2016 - cloze_test_ALL_test.csv" \ --output_dir ../log \ --train_batch_size 16 \ """ import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) logger = logging.getLogger(__name__) def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def load_rocstories_dataset(dataset_path): """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """ with open(dataset_path, encoding="utf_8") as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((" ".join(line[1:5]), line[5], line[6], int(line[-1]) - 1)) return output def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token): """ Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] """ tensor_datasets = [] for dataset in encoded_datasets: n_batch = len(dataset) input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64) mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64) lm_labels = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.int64) mc_labels = np.zeros((n_batch,), dtype=np.int64) for i, (story, cont1, cont2, mc_label), in enumerate(dataset): with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token] input_ids[i, 0, : len(with_cont1)] = with_cont1 input_ids[i, 1, : len(with_cont2)] = with_cont2 mc_token_ids[i, 0] = len(with_cont1) - 1 mc_token_ids[i, 1] = len(with_cont2) - 1 lm_labels[i, 0, : len(with_cont1)] = with_cont1 lm_labels[i, 1, : len(with_cont2)] = with_cont2 mc_labels[i] = mc_label all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs)) return tensor_datasets def main(): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, default="openai-gpt", help="pretrained model name") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--train_dataset", type=str, default="") parser.add_argument("--eval_dataset", type=str, default="") parser.add_argument("--seed", type=int, default=42) parser.add_argument("--num_train_epochs", type=int, default=3) parser.add_argument("--train_batch_size", type=int, default=8) parser.add_argument("--eval_batch_size", type=int, default=16) parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", type=int, default=1) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training \ steps to perform. Override num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before\ performing a backward/update pass.", ) parser.add_argument("--learning_rate", type=float, default=6.25e-5) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--lr_schedule", type=str, default="warmup_linear") parser.add_argument("--weight_decay", type=float, default=0.01) parser.add_argument("--lm_coef", type=float, default=0.9) parser.add_argument("--n_valid", type=int, default=374) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") args = parser.parse_args() print(args) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(device, n_gpu)) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset special_tokens = ["_start_", "_delimiter_", "_classify_"] tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name) tokenizer.add_tokens(special_tokens) special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens) model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name) model.resize_token_embeddings(len(tokenizer)) model.to(device) # Load and encode the datasets def tokenize_and_encode(obj): """ Tokenize and encode a nested object """ if isinstance(obj, str): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj)) elif isinstance(obj, int): return obj return list(tokenize_and_encode(o) for o in obj) logger.info("Encoding dataset...") train_dataset = load_rocstories_dataset(args.train_dataset) eval_dataset = load_rocstories_dataset(args.eval_dataset) datasets = (train_dataset, eval_dataset) encoded_datasets = tokenize_and_encode(datasets) # Compute the max input length for the Transformer max_length = model.config.n_positions // 2 - 2 input_length = max( len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3 for dataset in encoded_datasets for story, cont1, cont2, _ in dataset ) input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids) train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1] train_data = TensorDataset(*train_tensor_dataset) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) eval_data = TensorDataset(*eval_tensor_dataset) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) # Prepare optimizer if args.do_train: if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs param_optimizer = list(model.named_parameters()) no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.do_train: nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_steps = 0 tqdm_bar = tqdm(train_dataloader, desc="Training") for step, batch in enumerate(tqdm_bar): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels) loss = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() exp_average_loss = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, scheduler.get_lr()[0]) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, "module") else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir) tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir) model.to(device) if args.do_eval: model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch with torch.no_grad(): _, mc_loss, _, mc_logits = model( input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels ) mc_logits = mc_logits.detach().cpu().numpy() mc_labels = mc_labels.to("cpu").numpy() tmp_eval_accuracy = accuracy(mc_logits, mc_labels) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples train_loss = tr_loss / nb_tr_steps if args.do_train else None result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": main()
14,226
43.880126
132
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/contrib/run_camembert.py
import torch from transformers.modeling_camembert import CamembertForMaskedLM from transformers.tokenization_camembert import CamembertTokenizer def fill_mask(masked_input, model, tokenizer, topk=5): # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count("<mask>") == 1 input_ids = torch.tensor(tokenizer.encode(masked_input, add_special_tokens=True)).unsqueeze(0) # Batch size 1 logits = model(input_ids)[0] # The last hidden-state is the first element of the output tuple masked_index = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() logits = logits[0, masked_index, :] prob = logits.softmax(dim=0) values, indices = prob.topk(k=topk, dim=0) topk_predicted_token_bpe = " ".join( [tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(indices))] ) masked_token = tokenizer.mask_token topk_filled_outputs = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" ")): predicted_token = predicted_token_bpe.replace("\u2581", " ") if " {0}".format(masked_token) in masked_input: topk_filled_outputs.append( ( masked_input.replace(" {0}".format(masked_token), predicted_token), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( (masked_input.replace(masked_token, predicted_token), values[index].item(), predicted_token,) ) return topk_filled_outputs tokenizer = CamembertTokenizer.from_pretrained("camembert-base") model = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() masked_input = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
1,901
42.227273
114
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/contrib/mm-imdb/run_mmimdb.py
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for multimodal multiclass prediction on MM-IMDB dataset.""" import argparse import glob import json import logging import os import random import numpy as np import torch import torch.nn as nn from sklearn.metrics import f1_score from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import ( WEIGHTS_NAME, AdamW, AutoConfig, AutoModel, AutoTokenizer, MMBTConfig, MMBTForClassification, get_linear_schedule_with_warmup, ) from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_transforms, get_mmimdb_labels try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer, criterion): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn, num_workers=args.num_workers, ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 best_f1, n_no_improve = 0, 0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) labels = batch[5] inputs = { "input_ids": batch[0], "input_modal": batch[2], "attention_mask": batch[1], "modal_start_tokens": batch[3], "modal_end_tokens": batch[4], } outputs = model(**inputs) logits = outputs[0] # model outputs are always tuple in transformers (see doc) loss = criterion(logits, labels) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer, criterion) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss for key, value in logs.items(): tb_writer.add_scalar(key, value, global_step) print(json.dumps({**logs, **{"step": global_step}})) if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training torch.save(model_to_save.state_dict(), os.path.join(output_dir, WEIGHTS_NAME)) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank == -1: results = evaluate(args, model, tokenizer, criterion) if results["micro_f1"] > best_f1: best_f1 = results["micro_f1"] n_no_improve = 0 else: n_no_improve += 1 if n_no_improve > args.patience: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, criterion, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_output_dir = args.output_dir eval_dataset = load_examples(args, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn ) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): batch = tuple(t.to(args.device) for t in batch) labels = batch[5] inputs = { "input_ids": batch[0], "input_modal": batch[2], "attention_mask": batch[1], "modal_start_tokens": batch[3], "modal_end_tokens": batch[4], } outputs = model(**inputs) logits = outputs[0] # model outputs are always tuple in transformers (see doc) tmp_eval_loss = criterion(logits, labels) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = torch.sigmoid(logits).detach().cpu().numpy() > 0.5 out_label_ids = labels.detach().cpu().numpy() else: preds = np.append(preds, torch.sigmoid(logits).detach().cpu().numpy() > 0.5, axis=0) out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps result = { "loss": eval_loss, "macro_f1": f1_score(out_label_ids, preds, average="macro"), "micro_f1": f1_score(out_label_ids, preds, average="micro"), } output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def load_examples(args, tokenizer, evaluate=False): path = os.path.join(args.data_dir, "dev.jsonl" if evaluate else "train.jsonl") transforms = get_image_transforms() labels = get_mmimdb_labels() dataset = JsonlDataset(path, tokenizer, transforms, labels, args.max_seq_length - args.num_image_embeds - 2) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .jsonl files for MMIMDB.", ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default=None, type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--num_image_embeds", default=1, type=int, help="Number of Image Embeddings from the Image Encoder" ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument("--patience", default=5, type=int, help="Patience for Early Stopping.") parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument("--num_workers", type=int, default=8, help="number of worker threads for dataloading") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab # Setup model labels = get_mmimdb_labels() num_labels = len(labels) transformer_config = AutoConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir, ) transformer = AutoModel.from_pretrained( args.model_name_or_path, config=transformer_config, cache_dir=args.cache_dir ) img_encoder = ImageEncoder(args) config = MMBTConfig(transformer_config, num_labels=num_labels) model = MMBTForClassification(config, transformer, img_encoder) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_examples(args, tokenizer, evaluate=False) label_frequences = train_dataset.get_label_frequencies() label_frequences = [label_frequences[l] for l in labels] label_weights = ( torch.tensor(label_frequences, device=args.device, dtype=torch.float) / len(train_dataset) ) ** -1 criterion = nn.BCEWithLogitsLoss(pos_weight=label_weights) global_step, tr_loss = train(args, train_dataset, model, tokenizer, criterion) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training torch.save(model_to_save.state_dict(), os.path.join(args.output_dir, WEIGHTS_NAME)) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = MMBTForClassification(config, transformer, img_encoder) model.load_state_dict(torch.load(os.path.join(args.output_dir, WEIGHTS_NAME))) tokenizer = AutoTokenizer.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = MMBTForClassification(config, transformer, img_encoder) model.load_state_dict(torch.load(checkpoint)) model.to(args.device) result = evaluate(args, model, tokenizer, criterion, prefix=prefix) result = dict((k + "_{}".format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
23,652
40.716049
123
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/contrib/mm-imdb/utils_mmimdb.py
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from collections import Counter import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from PIL import Image from torch.utils.data import Dataset POOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class ImageEncoder(nn.Module): def __init__(self, args): super().__init__() model = torchvision.models.resnet152(pretrained=True) modules = list(model.children())[:-2] self.model = nn.Sequential(*modules) self.pool = nn.AdaptiveAvgPool2d(POOLING_BREAKDOWN[args.num_image_embeds]) def forward(self, x): # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 out = self.pool(self.model(x)) out = torch.flatten(out, start_dim=2) out = out.transpose(1, 2).contiguous() return out # BxNx2048 class JsonlDataset(Dataset): def __init__(self, data_path, tokenizer, transforms, labels, max_seq_length): self.data = [json.loads(l) for l in open(data_path)] self.data_dir = os.path.dirname(data_path) self.tokenizer = tokenizer self.labels = labels self.n_classes = len(labels) self.max_seq_length = max_seq_length self.transforms = transforms def __len__(self): return len(self.data) def __getitem__(self, index): sentence = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"], add_special_tokens=True)) start_token, sentence, end_token = sentence[0], sentence[1:-1], sentence[-1] sentence = sentence[: self.max_seq_length] label = torch.zeros(self.n_classes) label[[self.labels.index(tgt) for tgt in self.data[index]["label"]]] = 1 image = Image.open(os.path.join(self.data_dir, self.data[index]["img"])).convert("RGB") image = self.transforms(image) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def get_label_frequencies(self): label_freqs = Counter() for row in self.data: label_freqs.update(row["label"]) return label_freqs def collate_fn(batch): lens = [len(row["sentence"]) for row in batch] bsz, max_seq_len = len(batch), max(lens) mask_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long) text_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(batch, lens)): text_tensor[i_batch, :length] = input_row["sentence"] mask_tensor[i_batch, :length] = 1 img_tensor = torch.stack([row["image"] for row in batch]) tgt_tensor = torch.stack([row["label"] for row in batch]) img_start_token = torch.stack([row["image_start_token"] for row in batch]) img_end_token = torch.stack([row["image_end_token"] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def get_mmimdb_labels(): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def get_image_transforms(): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.46777044, 0.44531429, 0.40661017], std=[0.12221994, 0.12145835, 0.14380469],), ] )
4,541
30.541667
119
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/distillation/grouped_batch_sampler.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from PyTorch Vision (https://github.com/pytorch/vision/blob/master/references/detection/group_by_aspect_ratio.py) """ import bisect import copy from collections import defaultdict import numpy as np from torch.utils.data.sampler import BatchSampler, Sampler from utils import logger def _quantize(x, bins): bins = copy.deepcopy(bins) bins = sorted(bins) quantized = list(map(lambda y: bisect.bisect_right(bins, y), x)) return quantized def create_lengths_groups(lengths, k=0): bins = np.arange(start=3, stop=k, step=4).tolist() if k > 0 else [10] groups = _quantize(lengths, bins) # count number of elements per group counts = np.unique(groups, return_counts=True)[1] fbins = [0] + bins + [np.inf] logger.info("Using {} as bins for aspect lengths quantization".format(fbins)) logger.info("Count of instances per bin: {}".format(counts)) return groups class GroupedBatchSampler(BatchSampler): """ Wraps another sampler to yield a mini-batch of indices. It enforces that the batch only contain elements from the same group. It also tries to provide mini-batches which follows an ordering which is as close as possible to the ordering from the original sampler. Arguments: sampler (Sampler): Base sampler. group_ids (list[int]): If the sampler produces indices in range [0, N), `group_ids` must be a list of `N` ints which contains the group id of each sample. The group ids must be a continuous set of integers starting from 0, i.e. they must be in the range [0, num_groups). batch_size (int): Size of mini-batch. """ def __init__(self, sampler, group_ids, batch_size): if not isinstance(sampler, Sampler): raise ValueError( "sampler should be an instance of " "torch.utils.data.Sampler, but got sampler={}".format(sampler) ) self.sampler = sampler self.group_ids = group_ids self.batch_size = batch_size def __iter__(self): buffer_per_group = defaultdict(list) samples_per_group = defaultdict(list) num_batches = 0 for idx in self.sampler: group_id = self.group_ids[idx] buffer_per_group[group_id].append(idx) samples_per_group[group_id].append(idx) if len(buffer_per_group[group_id]) == self.batch_size: yield buffer_per_group[group_id] # TODO num_batches += 1 del buffer_per_group[group_id] assert len(buffer_per_group[group_id]) < self.batch_size # now we have run out of elements that satisfy # the group criteria, let's return the remaining # elements so that the size of the sampler is # deterministic expected_num_batches = len(self) num_remaining = expected_num_batches - num_batches if num_remaining > 0: # for the remaining batches, group the batches by similar lengths batch_idx = [] for group_id, idxs in sorted(buffer_per_group.items(), key=lambda x: x[0]): batch_idx.extend(idxs) if len(batch_idx) >= self.batch_size: yield batch_idx[: self.batch_size] batch_idx = batch_idx[self.batch_size :] num_remaining -= 1 if len(batch_idx) > 0: yield batch_idx num_remaining -= 1 assert num_remaining == 0 def __len__(self): """ Return the number of mini-batches rather than the number of samples. """ return (len(self.sampler) + self.batch_size - 1) // self.batch_size
4,360
39.009174
125
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/distillation/utils.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utils to train DistilBERT adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) def git_log(folder_path: str): """ Log commit info. """ repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), } with open(os.path.join(folder_path, "git_log.json"), "w") as f: json.dump(repo_infos, f, indent=4) def init_gpu_params(params): """ Handle single and multi-GPU / multi-node. """ if params.n_gpu <= 0: params.local_rank = 0 params.master_port = -1 params.is_master = True params.multi_gpu = False return assert torch.cuda.is_available() logger.info("Initializing GPUs") if params.n_gpu > 1: assert params.local_rank != -1 params.world_size = int(os.environ["WORLD_SIZE"]) params.n_gpu_per_node = int(os.environ["N_GPU_NODE"]) params.global_rank = int(os.environ["RANK"]) # number of nodes / node ID params.n_nodes = params.world_size // params.n_gpu_per_node params.node_id = params.global_rank // params.n_gpu_per_node params.multi_gpu = True assert params.n_nodes == int(os.environ["N_NODES"]) assert params.node_id == int(os.environ["NODE_RANK"]) # local job (single GPU) else: assert params.local_rank == -1 params.n_nodes = 1 params.node_id = 0 params.local_rank = 0 params.global_rank = 0 params.world_size = 1 params.n_gpu_per_node = 1 params.multi_gpu = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode params.is_master = params.node_id == 0 and params.local_rank == 0 params.multi_node = params.n_nodes > 1 # summary PREFIX = f"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes) logger.info(PREFIX + "Node ID : %i" % params.node_id) logger.info(PREFIX + "Local rank : %i" % params.local_rank) logger.info(PREFIX + "World size : %i" % params.world_size) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node) logger.info(PREFIX + "Master : %s" % str(params.is_master)) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node)) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu)) logger.info(PREFIX + "Hostname : %s" % socket.gethostname()) # set GPU device torch.cuda.set_device(params.local_rank) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed") torch.distributed.init_process_group( init_method="env://", backend="nccl", ) def set_seed(args): """ Set the random seed. """ np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed)
4,268
31.097744
90
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/distillation/lm_seqs_dataset.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dataset to distilled models adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import numpy as np import torch from torch.utils.data import Dataset from utils import logger class LmSeqsDataset(Dataset): """Custom Dataset wrapping language modeling sequences. Each sample will be retrieved by indexing the list of token_ids and their corresponding lengths. Input: ------ params: `NameSpace` parameters data: `List[np.array[int]] """ def __init__(self, params, data): self.params = params self.token_ids = np.array(data) self.lengths = np.array([len(t) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__(self, index): return (self.token_ids[index], self.lengths[index]) def __len__(self): return len(self.lengths) def check(self): """ Some sanity checks """ assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def remove_long_sequences(self): """ Sequences that are too long are splitted by chunk of max_model_input_size. """ max_len = self.params.max_model_input_size indices = self.lengths > max_len logger.info(f"Splitting {sum(indices)} too long sequences.") def divide_chunks(l, n): return [l[i : i + n] for i in range(0, len(l), n)] new_tok_ids = [] new_lengths = [] if self.params.mlm: cls_id, sep_id = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: cls_id, sep_id = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids, self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: sub_seqs = [] for sub_s in divide_chunks(seq_, max_len - 2): if sub_s[0] != cls_id: sub_s = np.insert(sub_s, 0, cls_id) if sub_s[-1] != sep_id: sub_s = np.insert(sub_s, len(sub_s), sep_id) assert len(sub_s) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(sub_s) new_tok_ids.extend(sub_seqs) new_lengths.extend([len(l) for l in sub_seqs]) self.token_ids = np.array(new_tok_ids) self.lengths = np.array(new_lengths) def remove_empty_sequences(self): """ Too short sequences are simply removed. This could be tunedd. """ init_size = len(self) indices = self.lengths > 11 self.token_ids = self.token_ids[indices] self.lengths = self.lengths[indices] new_size = len(self) logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences.") def remove_unknown_sequences(self): """ Remove sequences with a (too) high level of unknown tokens. """ if "unk_token" not in self.params.special_tok_ids: return else: unk_token_id = self.params.special_tok_ids["unk_token"] init_size = len(self) unk_occs = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) indices = (unk_occs / self.lengths) < 0.5 self.token_ids = self.token_ids[indices] self.lengths = self.lengths[indices] new_size = len(self) logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).") def print_statistics(self): """ Print some statistics on the corpus. Only the master process. """ if not self.params.is_master: return logger.info(f"{len(self)} sequences") # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unkown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unkown} unknown tokens (covering {100*nb_unkown/data_len:.2f}% of the data)') def batch_sequences(self, batch): """ Do the padding and transform into torch.tensor. """ token_ids = [t[0] for t in batch] lengths = [t[1] for t in batch] assert len(token_ids) == len(lengths) # Max for paddings max_seq_len_ = max(lengths) # Pad token ids if self.params.mlm: pad_idx = self.params.special_tok_ids["pad_token"] else: pad_idx = self.params.special_tok_ids["unk_token"] tk_ = [list(t.astype(int)) + [pad_idx] * (max_seq_len_ - len(t)) for t in token_ids] assert len(tk_) == len(token_ids) assert all(len(t) == max_seq_len_ for t in tk_) tk_t = torch.tensor(tk_) # (bs, max_seq_len_) lg_t = torch.tensor(lengths) # (bs) return tk_t, lg_t
6,133
35.730539
111
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/distillation/run_squad_w_distillation.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is the exact same script as `examples/question-answering/run_squad.py` (as of 2020, January 8th) with an additional and optional step of distillation.""" import argparse import glob import logging import os import random import timeit import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertForQuestionAnswering, BertTokenizer, DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer, RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer, XLMConfig, XLMForQuestionAnswering, XLMTokenizer, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, BertForQuestionAnswering, BertTokenizer), "xlnet": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), "xlm": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), "distilbert": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer, teacher=None): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): try: # set global_step to gobal_step of last saved checkpoint from model path checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproductibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() if teacher is not None: teacher.eval() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type != "distilbert": inputs["token_type_ids"] = None if args.model_type == "xlm" else batch[2] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) outputs = model(**inputs) loss, start_logits_stu, end_logits_stu = outputs # Distillation loss if teacher is not None: if "token_type_ids" not in inputs: inputs["token_type_ids"] = None if args.teacher_type == "xlm" else batch[2] with torch.no_grad(): start_logits_tea, end_logits_tea = teacher( input_ids=inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], ) assert start_logits_tea.size() == start_logits_stu.size() assert end_logits_tea.size() == end_logits_stu.size() loss_fct = nn.KLDivLoss(reduction="batchmean") loss_start = loss_fct( F.log_softmax(start_logits_stu / args.temperature, dim=-1), F.softmax(start_logits_tea / args.temperature, dim=-1), ) * (args.temperature ** 2) loss_end = loss_fct( F.log_softmax(end_logits_stu / args.temperature, dim=-1), F.softmax(end_logits_tea / args.temperature, dim=-1), ) * (args.temperature ** 2) loss_ce = (loss_start + loss_end) / 2.0 loss = args.alpha_ce * loss_ce + args.alpha_squad * loss if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1]} if args.model_type != "distilbert": inputs["token_type_ids"] = None if args.model_type == "xlm" else batch[2] # XLM don't use segment_ids example_indices = batch[3] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None if args.model_type in ["xlnet", "xlm"]: # XLNet uses a more complex post-processing procedure predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join( os.path.dirname(input_file), "cached_distillation_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) try: features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) except KeyError: raise DeprecationWarning( "You seem to be loading features from an older version of this script please delete the " "file %s in order for it to be created again" % cached_features_file ) else: logger.info("Creating features from dataset file at %s", input_file) processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Distillation parameters (optional) parser.add_argument( "--teacher_type", default=None, type=str, help="Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.", ) parser.add_argument( "--teacher_name_or_path", default=None, type=str, help="Path to the already SQuAD fine-tuned teacher model. Only for distillation.", ) parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." ) parser.add_argument( "--alpha_squad", default=0.5, type=float, help="True SQuAD loss linear weight. Only for distillation." ) parser.add_argument( "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.", ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.", ) parser.add_argument( "--verbose_logging", action="store_true", help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.", ) parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_ce > 0.0 assert args.alpha_ce + args.alpha_squad > 0.0 assert args.teacher_type != "distilbert", "We constraint teachers not to be of type DistilBERT." teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained( args.teacher_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None ) teacher = teacher_model_class.from_pretrained( args.teacher_name_or_path, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None ) teacher.to(args.device) else: teacher = None if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: logger.info("Loading checkpoints saved during training for evaluation") checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
35,797
40.529002
162
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/distillation/train.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Training the distilled model. Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2. """ import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed MODEL_CLASSES = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), } def sanity_checks(args): """ A bunch of args sanity checks to perform even starting... """ assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def freeze_pos_embeddings(student, args): if args.student_type == "roberta": student.roberta.embeddings.position_embeddings.weight.requires_grad = False elif args.student_type == "gpt2": student.transformer.wpe.weight.requires_grad = False def freeze_token_type_embeddings(student, args): if args.student_type == "roberta": student.roberta.embeddings.token_type_embeddings.weight.requires_grad = False def main(): parser = argparse.ArgumentParser(description="Training") parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists.") parser.add_argument( "--dump_path", type=str, required=True, help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file", type=str, required=True, help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.", ) parser.add_argument( "--student_type", type=str, choices=["distilbert", "roberta", "gpt2"], required=True, help="The student type (DistilBERT, RoBERTa).", ) parser.add_argument("--student_config", type=str, required=True, help="Path to the student configuration.") parser.add_argument( "--student_pretrained_weights", default=None, type=str, help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type", choices=["bert", "roberta", "gpt2"], required=True, help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name", type=str, required=True, help="The teacher model.") parser.add_argument("--temperature", default=2.0, type=float, help="Temperature for the softmax temperature.") parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm", default=0.0, type=float, help="Linear weight for the MLM loss. Must be >=0. Should be used in coonjunction with `mlm` flag.", ) parser.add_argument("--alpha_clm", default=0.5, type=float, help="Linear weight for the CLM loss. Must be >=0.") parser.add_argument("--alpha_mse", default=0.0, type=float, help="Linear weight of the MSE loss. Must be >=0.") parser.add_argument( "--alpha_cos", default=0.0, type=float, help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop", default=0.15, type=float, help="Proportion of tokens for which we need to make a prediction.", ) parser.add_argument("--word_mask", default=0.8, type=float, help="Proportion of tokens to mask out.") parser.add_argument("--word_keep", default=0.1, type=float, help="Proportion of tokens to keep.") parser.add_argument("--word_rand", default=0.1, type=float, help="Proportion of tokens to randomly replace.") parser.add_argument( "--mlm_smoothing", default=0.7, type=float, help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).", ) parser.add_argument("--token_counts", type=str, help="The token counts in the data_file for MLM.") parser.add_argument( "--restrict_ce_to_mask", action="store_true", help="If true, compute the distilation loss only the [MLM] prediction distribution.", ) parser.add_argument( "--freeze_pos_embs", action="store_true", help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.", ) parser.add_argument( "--freeze_token_type_embds", action="store_true", help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.", ) parser.add_argument("--n_epoch", type=int, default=3, help="Number of pass on the whole dataset.") parser.add_argument("--batch_size", type=int, default=5, help="Batch size (for each process).") parser.add_argument( "--group_by_size", action="store_false", help="If true, group sequences that have similar length into the same batch. Default is true.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=50, help="Gradient accumulation for larger training batches.", ) parser.add_argument("--warmup_prop", default=0.05, type=float, help="Linear warmup proportion.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=5.0, type=float, help="Max gradient norm.") parser.add_argument("--initializer_range", default=0.02, type=float, help="Random initialization range.") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--n_gpu", type=int, default=1, help="Number of GPUs in the node.") parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank") parser.add_argument("--seed", type=int, default=56, help="Random seed") parser.add_argument("--log_interval", type=int, default=500, help="Tensorboard logging interval.") parser.add_argument("--checkpoint_interval", type=int, default=4000, help="Checkpoint interval.") args = parser.parse_args() sanity_checks(args) # ARGS # init_gpu_params(args) set_seed(args) if args.is_master: if os.path.exists(args.dump_path): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite it" "Use `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path) if not os.path.exists(args.dump_path): os.makedirs(args.dump_path) logger.info(f"Experiment will be dumped and logged in {args.dump_path}") # SAVE PARAMS # logger.info(f"Param: {args}") with open(os.path.join(args.dump_path, "parameters.json"), "w") as f: json.dump(vars(args), f, indent=4) git_log(args.dump_path) student_config_class, student_model_class, _ = MODEL_CLASSES[args.student_type] teacher_config_class, teacher_model_class, teacher_tokenizer_class = MODEL_CLASSES[args.teacher_type] # TOKENIZER # tokenizer = teacher_tokenizer_class.from_pretrained(args.teacher_name) special_tok_ids = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): idx = tokenizer.all_special_tokens.index(tok_symbol) special_tok_ids[tok_name] = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}") args.special_tok_ids = special_tok_ids args.max_model_input_size = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}") with open(args.data_file, "rb") as fp: data = pickle.load(fp) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)") with open(args.token_counts, "rb") as fp: counts = pickle.load(fp) token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing for idx in special_tok_ids.values(): token_probs[idx] = 0.0 # do not predict special tokens token_probs = torch.from_numpy(token_probs) else: token_probs = None train_lm_seq_dataset = LmSeqsDataset(params=args, data=data) logger.info("Data loader created.") # STUDENT # logger.info(f"Loading student config from {args.student_config}") stu_architecture_config = student_config_class.from_pretrained(args.student_config) stu_architecture_config.output_hidden_states = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}") student = student_model_class.from_pretrained(args.student_pretrained_weights, config=stu_architecture_config) else: student = student_model_class(stu_architecture_config) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}") logger.info("Student loaded.") # TEACHER # teacher = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=True) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}") logger.info(f"Teacher loaded from {args.teacher_name}.") # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(student, args) if args.freeze_token_type_embds: freeze_token_type_embeddings(student, args) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() distiller = Distiller( params=args, dataset=train_lm_seq_dataset, token_probs=token_probs, student=student, teacher=teacher ) distiller.train() logger.info("Let's go get some drinks.") if __name__ == "__main__": main()
12,975
39.173375
122
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/distillation/distiller.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The distiller to distil the student. Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import math import os import time import psutil import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import AdamW from torch.utils.data import BatchSampler, DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups from lm_seqs_dataset import LmSeqsDataset from transformers import get_linear_schedule_with_warmup from utils import logger try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter class Distiller: def __init__( self, params: dict, dataset: LmSeqsDataset, token_probs: torch.tensor, student: nn.Module, teacher: nn.Module ): logger.info("Initializing Distiller") self.params = params self.dump_path = params.dump_path self.multi_gpu = params.multi_gpu self.fp16 = params.fp16 self.student = student self.teacher = teacher self.student_config = student.config self.vocab_size = student.config.vocab_size if params.n_gpu <= 1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) if params.group_by_size: groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size) sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size) else: sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False) self.dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, collate_fn=dataset.batch_sequences) self.temperature = params.temperature assert self.temperature > 0.0 self.alpha_ce = params.alpha_ce self.alpha_mlm = params.alpha_mlm self.alpha_clm = params.alpha_clm self.alpha_mse = params.alpha_mse self.alpha_cos = params.alpha_cos self.mlm = params.mlm if self.mlm: logger.info("Using MLM loss for LM step.") self.mlm_mask_prop = params.mlm_mask_prop assert 0.0 <= self.mlm_mask_prop <= 1.0 assert params.word_mask + params.word_keep + params.word_rand == 1.0 self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand]) self.pred_probs = self.pred_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else self.pred_probs self.token_probs = token_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else token_probs if self.fp16: self.pred_probs = self.pred_probs.half() self.token_probs = self.token_probs.half() else: logger.info("Using CLM loss for LM step.") self.epoch = 0 self.n_iter = 0 self.n_total_iter = 0 self.n_sequences_epoch = 0 self.total_loss_epoch = 0 self.last_loss = 0 self.last_loss_ce = 0 self.last_loss_mlm = 0 self.last_loss_clm = 0 if self.alpha_mse > 0.0: self.last_loss_mse = 0 if self.alpha_cos > 0.0: self.last_loss_cos = 0 self.last_log = 0 self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-100) if self.alpha_mse > 0.0: self.mse_loss_fct = nn.MSELoss(reduction="sum") if self.alpha_cos > 0.0: self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction="mean") logger.info("--- Initializing model optimizer") assert params.gradient_accumulation_steps >= 1 self.num_steps_epoch = len(self.dataloader) num_train_optimization_steps = ( int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1 ) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": params.weight_decay, }, { "params": [ p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": 0.0, }, ] logger.info( "------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]) ) logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()])) self.optimizer = AdamW( optimizer_grouped_parameters, lr=params.learning_rate, eps=params.adam_epsilon, betas=(0.9, 0.98) ) warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop) self.scheduler = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps ) if self.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level") self.student, self.optimizer = amp.initialize( self.student, self.optimizer, opt_level=self.params.fp16_opt_level ) self.teacher = self.teacher.half() if self.multi_gpu: if self.fp16: from apex.parallel import DistributedDataParallel logger.info("Using apex.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel(self.student) else: from torch.nn.parallel import DistributedDataParallel logger.info("Using nn.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel( self.student, device_ids=[params.local_rank], output_device=params.local_rank, find_unused_parameters=True, ) self.is_master = params.is_master if self.is_master: logger.info("--- Initializing Tensorboard") self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, "log", "train")) self.tensorboard.add_text(tag="config/training", text_string=str(self.params), global_step=0) self.tensorboard.add_text(tag="config/student", text_string=str(self.student_config), global_step=0) def prepare_batch_mlm(self, batch): """ Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the masked label for MLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. mlm_labels: `torch.tensor(bs, seq_length)` - The masked languge modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] bs, max_seq_len = token_ids.size() mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids) x_prob = self.token_probs[token_ids.flatten()] n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item()) tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False) pred_mask = torch.zeros( bs * max_seq_len, dtype=torch.bool, device=token_ids.device ) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility pred_mask[tgt_ids] = 1 pred_mask = pred_mask.view(bs, max_seq_len) pred_mask[token_ids == self.params.special_tok_ids["pad_token"]] = 0 # mask a number of words == 0 [8] (faster with fp16) if self.fp16: n1 = pred_mask.sum().item() if n1 > 8: pred_mask = pred_mask.view(-1) n2 = max(n1 % 8, 8 * (n1 // 8)) if n2 != n1: pred_mask[torch.nonzero(pred_mask).view(-1)[: n1 - n2]] = 0 pred_mask = pred_mask.view(bs, max_seq_len) assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item() _token_ids_real = token_ids[pred_mask] _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size) _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids["mask_token"]) probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True) _token_ids = ( _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long() ) token_ids = token_ids.masked_scatter(pred_mask, _token_ids) mlm_labels[~pred_mask] = -100 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, mlm_labels def prepare_batch_clm(self, batch): """ Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the labels for CLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. clm_labels: `torch.tensor(bs, seq_length)` - The causal languge modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] clm_labels = token_ids.new(token_ids.size()).copy_(token_ids) clm_labels[~attn_mask] = -100 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, clm_labels def round_batch(self, x: torch.tensor, lengths: torch.tensor): """ For float16 only. Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8. Input: ------ x: `torch.tensor(bs, seq_length)` - The token ids. lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch. Output: ------- x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids. lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths. """ if not self.fp16 or len(lengths) < 8: return x, lengths # number of sentences == 0 [8] bs1 = len(lengths) bs2 = 8 * (bs1 // 8) assert bs2 > 0 and bs2 % 8 == 0 if bs1 != bs2: idx = torch.randperm(bs1)[:bs2] lengths = lengths[idx] slen = lengths.max().item() x = x[idx, :slen] else: idx = None # sequence length == 0 [8] ml1 = x.size(1) if ml1 % 8 != 0: pad = 8 - (ml1 % 8) ml2 = ml1 + pad if self.mlm: pad_id = self.params.special_tok_ids["pad_token"] else: pad_id = self.params.special_tok_ids["unk_token"] padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id) x = torch.cat([x, padding_tensor], 1) assert x.size() == (bs2, ml2) assert x.size(0) % 8 == 0 assert x.size(1) % 8 == 0 return x, lengths def train(self): """ The real training loop. """ if self.is_master: logger.info("Starting training") self.last_log = time.time() self.student.train() self.teacher.eval() for _ in range(self.params.n_epoch): if self.is_master: logger.info(f"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}") if self.multi_gpu: torch.distributed.barrier() iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0]) for batch in iter_bar: if self.params.n_gpu > 0: batch = tuple(t.to(f"cuda:{self.params.local_rank}") for t in batch) if self.mlm: token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch) else: token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch) self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels) iter_bar.update() iter_bar.set_postfix( {"Last_loss": f"{self.last_loss:.2f}", "Avg_cum_loss": f"{self.total_loss_epoch/self.n_iter:.2f}"} ) iter_bar.close() if self.is_master: logger.info(f"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}") self.end_epoch() if self.is_master: logger.info("Save very last checkpoint as `pytorch_model.bin`.") self.save_checkpoint(checkpoint_name="pytorch_model.bin") logger.info("Training is finished") def step(self, input_ids: torch.tensor, attention_mask: torch.tensor, lm_labels: torch.tensor): """ One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation), and possibly a parameter update (depending on the gradient accumulation). Input: ------ input_ids: `torch.tensor(bs, seq_length)` - The token ids. attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention. lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM). """ if self.mlm: s_logits, s_hidden_states = self.student( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) with torch.no_grad(): t_logits, t_hidden_states = self.teacher( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) else: s_logits, _, s_hidden_states = self.student( input_ids=input_ids, attention_mask=None ) # (bs, seq_length, voc_size) with torch.no_grad(): t_logits, _, t_hidden_states = self.teacher( input_ids=input_ids, attention_mask=None ) # (bs, seq_length, voc_size) assert s_logits.size() == t_logits.size() # https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100 # https://github.com/peterliht/knowledge-distillation-pytorch/issues/2 if self.params.restrict_ce_to_mask: mask = (lm_labels > -1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size) else: mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size) s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask assert t_logits_slct.size() == s_logits_slct.size() loss_ce = ( self.ce_loss_fct( F.log_softmax(s_logits_slct / self.temperature, dim=-1), F.softmax(t_logits_slct / self.temperature, dim=-1), ) * (self.temperature) ** 2 ) loss = self.alpha_ce * loss_ce if self.alpha_mlm > 0.0: loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1)) loss += self.alpha_mlm * loss_mlm if self.alpha_clm > 0.0: shift_logits = s_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss += self.alpha_clm * loss_clm if self.alpha_mse > 0.0: loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct) / s_logits_slct.size( 0 ) # Reproducing batchmean reduction loss += self.alpha_mse * loss_mse if self.alpha_cos > 0.0: s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim) t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim) mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim) assert s_hidden_states.size() == t_hidden_states.size() dim = s_hidden_states.size(-1) s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim) s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim) t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,) loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target) loss += self.alpha_cos * loss_cos self.total_loss_epoch += loss.item() self.last_loss = loss.item() self.last_loss_ce = loss_ce.item() if self.alpha_mlm > 0.0: self.last_loss_mlm = loss_mlm.item() if self.alpha_clm > 0.0: self.last_loss_clm = loss_clm.item() if self.alpha_mse > 0.0: self.last_loss_mse = loss_mse.item() if self.alpha_cos > 0.0: self.last_loss_cos = loss_cos.item() self.optimize(loss) self.n_sequences_epoch += input_ids.size(0) def optimize(self, loss): """ Normalization on the loss (gradient accumulation or distributed training), followed by backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation). Also update the metrics for tensorboard. """ # Check for NaN if (loss != loss).data.any(): logger.error("NaN detected") exit() if self.multi_gpu: loss = loss.mean() if self.params.gradient_accumulation_steps > 1: loss = loss / self.params.gradient_accumulation_steps if self.fp16: from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() self.iter() if self.n_iter % self.params.gradient_accumulation_steps == 0: if self.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() def iter(self): """ Update global counts, write to tensorboard and save checkpoint. """ self.n_iter += 1 self.n_total_iter += 1 if self.n_total_iter % self.params.log_interval == 0: self.log_tensorboard() self.last_log = time.time() if self.n_total_iter % self.params.checkpoint_interval == 0: self.save_checkpoint() def log_tensorboard(self): """ Log into tensorboard. Only by the master process. """ if not self.is_master: return for param_name, param in self.student.named_parameters(): self.tensorboard.add_scalar( tag="parameter_mean/" + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="parameter_std/" + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter ) if param.grad is None: continue self.tensorboard.add_scalar( tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.n_total_iter, ) self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter) self.tensorboard.add_scalar( tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter ) if self.alpha_mlm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter ) if self.alpha_clm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter ) if self.alpha_mse > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter ) if self.alpha_cos > 0.0: self.tensorboard.add_scalar( tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()["used"] / 1_000_000, global_step=self.n_total_iter, ) self.tensorboard.add_scalar( tag="global/speed", scalar_value=time.time() - self.last_log, global_step=self.n_total_iter ) def end_epoch(self): """ Finally arrived at the end of epoch (full pass on dataset). Do some tensorboard logging and checkpoint saving. """ logger.info(f"{self.n_sequences_epoch} sequences have been trained during this epoch.") if self.is_master: self.save_checkpoint(checkpoint_name=f"model_epoch_{self.epoch}.pth") self.tensorboard.add_scalar( tag="epoch/loss", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.epoch ) self.epoch += 1 self.n_sequences_epoch = 0 self.n_iter = 0 self.total_loss_epoch = 0 def save_checkpoint(self, checkpoint_name: str = "checkpoint.pth"): """ Save the current state. Only by the master process. """ if not self.is_master: return mdl_to_save = self.student.module if hasattr(self.student, "module") else self.student mdl_to_save.config.save_pretrained(self.dump_path) state_dict = mdl_to_save.state_dict() torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
26,135
42.271523
143
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/distillation/scripts/extract.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training the distilled model. Specific to RoBERTa -> DistilRoBERTa and GPT2 -> DistilGPT2. """ import argparse import torch from transformers import GPT2LMHeadModel, RobertaForMaskedLM if __name__ == "__main__": parser = argparse.ArgumentParser( description="Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned Distillation" ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") args = parser.parse_args() if args.model_type == "roberta": model = RobertaForMaskedLM.from_pretrained(args.model_name) prefix = "roberta" elif args.model_type == "gpt2": model = GPT2LMHeadModel.from_pretrained(args.model_name) prefix = "transformer" state_dict = model.state_dict() compressed_sd = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: compressed_sd[f"{prefix}.{param_name}"] = state_dict[f"{prefix}.{param_name}"] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: param_name = f"{prefix}.embeddings.{w}.weight" compressed_sd[param_name] = state_dict[param_name] for w in ["weight", "bias"]: param_name = f"{prefix}.embeddings.LayerNorm.{w}" compressed_sd[param_name] = state_dict[param_name] # Transformer Blocks # std_idx = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: compressed_sd[f"{prefix}.h.{std_idx}.{layer}.{w}"] = state_dict[ f"{prefix}.h.{teacher_idx}.{layer}.{w}" ] compressed_sd[f"{prefix}.h.{std_idx}.attn.bias"] = state_dict[f"{prefix}.h.{teacher_idx}.attn.bias"] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: compressed_sd[f"{prefix}.encoder.layer.{std_idx}.{layer}.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: compressed_sd[f"{layer}"] = state_dict[f"{layer}"] if args.vocab_transform: for w in ["weight", "bias"]: compressed_sd[f"lm_head.dense.{w}"] = state_dict[f"lm_head.dense.{w}"] compressed_sd[f"lm_head.layer_norm.{w}"] = state_dict[f"lm_head.layer_norm.{w}"] elif args.model_type == "gpt2": for w in ["weight", "bias"]: compressed_sd[f"{prefix}.ln_f.{w}"] = state_dict[f"{prefix}.ln_f.{w}"] compressed_sd["lm_head.weight"] = state_dict["lm_head.weight"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transfered for distillation: {len(compressed_sd.keys())}") print(f"Save transfered checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
4,448
42.194175
128
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/distillation/scripts/extract_distilbert.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training DistilBERT. Specific to BERT -> DistilBERT. """ import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": parser = argparse.ArgumentParser( description="Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned Distillation" ) parser.add_argument("--model_type", default="bert", choices=["bert"]) parser.add_argument("--model_name", default="bert-base-uncased", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") args = parser.parse_args() if args.model_type == "bert": model = BertForMaskedLM.from_pretrained(args.model_name) prefix = "bert" else: raise ValueError('args.model_type should be "bert".') state_dict = model.state_dict() compressed_sd = {} for w in ["word_embeddings", "position_embeddings"]: compressed_sd[f"distilbert.embeddings.{w}.weight"] = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: compressed_sd[f"distilbert.embeddings.LayerNorm.{w}"] = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] std_idx = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.q_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.k_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.v_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.out_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.sa_layer_norm.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.ffn.lin1.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.ffn.lin2.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.output_layer_norm.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 compressed_sd["vocab_projector.weight"] = state_dict["cls.predictions.decoder.weight"] compressed_sd["vocab_projector.bias"] = state_dict["cls.predictions.bias"] if args.vocab_transform: for w in ["weight", "bias"]: compressed_sd[f"vocab_transform.{w}"] = state_dict[f"cls.predictions.transform.dense.{w}"] compressed_sd[f"vocab_layer_norm.{w}"] = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transfered for distillation: {len(compressed_sd.keys())}") print(f"Save transfered checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
4,314
45.397849
128
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/bert-loses-patience/run_glue_with_pabee.py
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and Microsoft Corporation. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Training and inference using the library models for sequence classification on GLUE (Bert, Albert) with PABEE.""" import argparse import glob import json import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from pabee.modeling_pabee_albert import AlbertForSequenceClassificationWithPabee from pabee.modeling_pabee_bert import BertForSequenceClassificationWithPabee from transformers import ( WEIGHTS_NAME, AdamW, AlbertConfig, AlbertTokenizer, BertConfig, BertTokenizer, get_linear_schedule_with_warmup, ) from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, BertForSequenceClassificationWithPabee, BertTokenizer), "albert": (AlbertConfig, AlbertForSequenceClassificationWithPabee, AlbertTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to gobal_step of last saved checkpoint from model path global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info( " Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch, ) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) # Added here for reproductibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], } inputs["token_type_ids"] = batch[2] outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss for key, value in logs.items(): tb_writer.add_scalar(key, value, global_step) print(json.dumps({**logs, **{"step": global_step}})) if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix="", patience=0): if args.model_type == "albert": model.albert.set_regression_threshold(args.regression_threshold) model.albert.set_patience(patience) model.albert.reset_stats() elif args.model_type == "bert": model.bert.set_regression_threshold(args.regression_threshold) model.bert.set_patience(patience) model.bert.reset_stats() else: raise NotImplementedError() # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], } inputs["token_type_ids"] = batch[2] outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) print(" %s = %s" % (key, str(result[key]))) writer.write("%s = %s\n" % (key, str(result[key]))) if args.eval_all_checkpoints and patience != 0: if args.model_type == "albert": model.albert.log_stats() elif args.model_type == "bert": model.bert.log_stats() else: raise NotImplementedError() return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = ( processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.", ) parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name.", ) parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--patience", default="0", type=str, required=False, ) parser.add_argument( "--regression_threshold", default=0, type=float, required=False, ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.", ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.", ) parser.add_argument( "--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.", ) parser.add_argument( "--per_gpu_eval_batch_size", default=1, type=int, help="Batch size per GPU/CPU for evaluation.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.", ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument( "--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.", ) parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument( "--local_rank", type=int, default=-1, help="For distributed training: local_rank", ) parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) if args.patience != "0" and args.per_gpu_eval_batch_size != 1: raise ValueError("The eval batch size must be 1 with PABEE inference on.") # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) print("Total Model Parameters:", sum(param.numel() for param in model.parameters())) output_layers_param_num = sum(param.numel() for param in model.classifiers.parameters()) print("Output Layers Parameters:", output_layers_param_num) single_output_layer_param_num = sum(param.numel() for param in model.classifiers[0].parameters()) print( "Added Output Layers Parameters:", output_layers_param_num - single_output_layer_param_num, ) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: patience_list = [int(x) for x in args.patience.split(",")] tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) print(f"Evaluation for checkpoint {prefix}") for patience in patience_list: result = evaluate(args, model, tokenizer, prefix=prefix, patience=patience) result = dict((k + "_{}".format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
29,833
41.74212
150
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/bert-loses-patience/pabee/modeling_pabee_bert.py
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and Microsoft Corporation. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model with Patience-based Early Exit. """ import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable from transformers.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) logger = logging.getLogger(__name__) class BertEncoderWithPabee(BertEncoder): def adaptive_forward(self, hidden_states, current_layer, attention_mask=None, head_mask=None): layer_outputs = self.layer[current_layer](hidden_states, attention_mask, head_mask[current_layer]) hidden_states = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, ) class BertModelWithPabee(BertModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration set to :obj:`True`; an :obj:`encoder_hidden_states` is expected as an input to the forward pass. .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762 """ def __init__(self, config): super().__init__(config) self.encoder = BertEncoderWithPabee(config) self.init_weights() self.patience = 0 self.inference_instances_num = 0 self.inference_layers_num = 0 self.regression_threshold = 0 def set_regression_threshold(self, threshold): self.regression_threshold = threshold def set_patience(self, patience): self.patience = patience def reset_stats(self): self.inference_instances_num = 0 self.inference_layers_num = 0 def log_stats(self): avg_inf_layers = self.inference_layers_num / self.inference_instances_num message = f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up = {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***" print(message) @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_dropout=None, output_layers=None, regression=False, ): r""" Return: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pre-training. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = embedding_output if self.training: res = [] for i in range(self.config.num_hidden_layers): encoder_outputs = self.encoder.adaptive_forward( encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask ) pooled_output = self.pooler(encoder_outputs) logits = output_layers[i](output_dropout(pooled_output)) res.append(logits) elif self.patience == 0: # Use all layers for inference encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, ) pooled_output = self.pooler(encoder_outputs[0]) res = [output_layers[self.config.num_hidden_layers - 1](pooled_output)] else: patient_counter = 0 patient_result = None calculated_layer_num = 0 for i in range(self.config.num_hidden_layers): calculated_layer_num += 1 encoder_outputs = self.encoder.adaptive_forward( encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask ) pooled_output = self.pooler(encoder_outputs) logits = output_layers[i](pooled_output) if regression: labels = logits.detach() if patient_result is not None: patient_labels = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold: patient_counter += 1 else: patient_counter = 0 else: labels = logits.detach().argmax(dim=1) if patient_result is not None: patient_labels = patient_result.detach().argmax(dim=1) if (patient_result is not None) and torch.all(labels.eq(patient_labels)): patient_counter += 1 else: patient_counter = 0 patient_result = logits if patient_counter == self.patience: break res = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BERT_START_DOCSTRING, ) class BertForSequenceClassificationWithPabee(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BertModelWithPabee(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifiers = nn.ModuleList( [nn.Linear(config.hidden_size, self.config.num_labels) for _ in range(config.num_hidden_layers)] ) self.init_weights() @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: from transformers import BertTokenizer, BertForSequenceClassification from pabee import BertForSequenceClassificationWithPabee import torch tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForSequenceClassificationWithPabee.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ logits = self.bert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_dropout=self.dropout, output_layers=self.classifiers, regression=self.num_labels == 1, ) outputs = (logits[-1],) if labels is not None: total_loss = None total_weights = 0 for ix, logits_item in enumerate(logits): if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits_item.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits_item.view(-1, self.num_labels), labels.view(-1)) if total_loss is None: total_loss = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 outputs = (total_loss / total_weights,) + outputs return outputs
15,265
43.507289
168
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/examples/bert-loses-patience/pabee/modeling_pabee_albert.py
# coding=utf-8 # Copyright 2020 Google AI, Google Brain, the HuggingFace Inc. team and Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ALBERT model with Patience-based Early Exit. """ import logging import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_callable from transformers.modeling_albert import ( ALBERT_INPUTS_DOCSTRING, ALBERT_START_DOCSTRING, AlbertModel, AlbertPreTrainedModel, AlbertTransformer, ) logger = logging.getLogger(__name__) class AlbertTransformerWithPabee(AlbertTransformer): def adaptive_forward(self, hidden_states, current_layer, attention_mask=None, head_mask=None): if current_layer == 0: hidden_states = self.embedding_hidden_mapping_in(hidden_states) else: hidden_states = hidden_states[0] layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups) # Index of the hidden group group_idx = int(current_layer / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( hidden_states, attention_mask, head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group], ) hidden_states = layer_group_output[0] return (hidden_states,) @add_start_docstrings( "The bare ALBERT Model transformer with PABEE outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) class AlbertModelWithPabee(AlbertModel): def __init__(self, config): super().__init__(config) self.encoder = AlbertTransformerWithPabee(config) self.init_weights() self.patience = 0 self.inference_instances_num = 0 self.inference_layers_num = 0 self.regression_threshold = 0 def set_regression_threshold(self, threshold): self.regression_threshold = threshold def set_patience(self, patience): self.patience = patience def reset_stats(self): self.inference_instances_num = 0 self.inference_layers_num = 0 def log_stats(self): avg_inf_layers = self.inference_layers_num / self.inference_instances_num message = f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up = {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***" print(message) @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_dropout=None, output_layers=None, regression=False, ): r""" Return: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pre-training. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = embedding_output if self.training: res = [] for i in range(self.config.num_hidden_layers): encoder_outputs = self.encoder.adaptive_forward( encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask, ) pooled_output = self.pooler_activation(self.pooler(encoder_outputs[0][:, 0])) logits = output_layers[i](output_dropout(pooled_output)) res.append(logits) elif self.patience == 0: # Use all layers for inference encoder_outputs = self.encoder(encoder_outputs, extended_attention_mask, head_mask=head_mask) pooled_output = self.pooler_activation(self.pooler(encoder_outputs[0][:, 0])) res = [output_layers[self.config.num_hidden_layers - 1](pooled_output)] else: patient_counter = 0 patient_result = None calculated_layer_num = 0 for i in range(self.config.num_hidden_layers): calculated_layer_num += 1 encoder_outputs = self.encoder.adaptive_forward( encoder_outputs, current_layer=i, attention_mask=extended_attention_mask, head_mask=head_mask, ) pooled_output = self.pooler_activation(self.pooler(encoder_outputs[0][:, 0])) logits = output_layers[i](pooled_output) if regression: labels = logits.detach() if patient_result is not None: patient_labels = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold: patient_counter += 1 else: patient_counter = 0 else: labels = logits.detach().argmax(dim=1) if patient_result is not None: patient_labels = patient_result.detach().argmax(dim=1) if (patient_result is not None) and torch.all(labels.eq(patient_labels)): patient_counter += 1 else: patient_counter = 0 patient_result = logits if patient_counter == self.patience: break res = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Albert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ALBERT_START_DOCSTRING, ) class AlbertForSequenceClassificationWithPabee(AlbertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModelWithPabee(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifiers = nn.ModuleList( [nn.Linear(config.hidden_size, self.config.num_labels) for _ in range(config.num_hidden_layers)] ) self.init_weights() @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs: loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. logits ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: from transformers import AlbertTokenizer from pabee import AlbertForSequenceClassificationWithPabee import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForSequenceClassificationWithPabee.from_pretrained('albert-base-v2') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ logits = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_dropout=self.dropout, output_layers=self.classifiers, regression=self.num_labels == 1, ) outputs = (logits[-1],) if labels is not None: total_loss = None total_weights = 0 for ix, logits_item in enumerate(logits): if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits_item.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits_item.view(-1, self.num_labels), labels.view(-1)) if total_loss is None: total_loss = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 outputs = (total_loss / total_weights,) + outputs return outputs
13,730
43.151125
168
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/src/transformers/modeling_encoder_decoder.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes to support Encoder-Decoder architectures """ import logging from typing import Optional from .configuration_encoder_decoder import EncoderDecoderConfig from .configuration_utils import PretrainedConfig from .modeling_utils import PreTrainedModel logger = logging.getLogger(__name__) class EncoderDecoderModel(PreTrainedModel): r""" :class:`~transformers.EncoderDecoder` is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)` class method for the encoder and `AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` class method for the decoder. """ config_class = EncoderDecoderConfig base_model_prefix = "encoder_decoder" def __init__( self, config: Optional[PretrainedConfig] = None, encoder: Optional[PreTrainedModel] = None, decoder: Optional[PreTrainedModel] = None, ): assert config is not None or ( encoder is not None and decoder is not None ), "Either a configuration or an Encoder and a decoder has to be provided" if config is None: config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config) else: assert isinstance(config, self.config_class), "config: {} has to be of type {}".format( config, self.config_class ) # initialize with config super().__init__(config) if encoder is None: from transformers import AutoModel encoder = AutoModel.from_config(config.encoder) if decoder is None: from transformers import AutoModelForCausalLM decoder = AutoModelForCausalLM.from_config(config.decoder) self.encoder = encoder self.decoder = decoder assert ( self.encoder.get_output_embeddings() is None ), "The encoder {} should not have a LM Head. Please use a model without LM Head" def tie_weights(self): # for now no weights tying in encoder-decoder pass def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def get_input_embeddings(self): return self.encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() @classmethod def from_encoder_decoder_pretrained( cls, encoder_pretrained_model_name_or_path: str = None, decoder_pretrained_model_name_or_path: str = None, *model_args, **kwargs ) -> PreTrainedModel: r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: encoder_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`): information necessary to initiate the encoder. Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. decoder_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`): information necessary to initiate the decoder. Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method kwargs: (`optional`) Remaining dictionary of keyword arguments. Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: Examples:: >>> from transformers import EncoderDecoderModel >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert """ kwargs_encoder = { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. encoder = kwargs_encoder.pop("model", None) if encoder is None: assert ( encoder_pretrained_model_name_or_path is not None ), "If `model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined" from .modeling_auto import AutoModel encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) encoder.config.is_decoder = False decoder = kwargs_decoder.pop("model", None) if decoder is None: assert ( decoder_pretrained_model_name_or_path is not None ), "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined" from .modeling_auto import AutoModelForCausalLM if "config" not in kwargs_decoder: from transformers import AutoConfig decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path) if decoder_config.is_decoder is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attribute `is_decoder` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` is set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`" ) decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) return cls(encoder=encoder, decoder=decoder) def forward( self, input_ids=None, inputs_embeds=None, attention_mask=None, head_mask=None, encoder_outputs=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_head_mask=None, decoder_inputs_embeds=None, labels=None, **kwargs, ): """ Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary for the encoder. Indices can be obtained using :class:`transformers.PretrainedTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices for the encoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules for the encoder. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`, defaults to :obj:`None`): Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`) `last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`, defaults to :obj:`None`): Provide for sequence to sequence training to the decoder. Indices can be obtained using :class:`transformers.PretrainedTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`): Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default. decoder_head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules for the decoder. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Labels for computing the masked language modeling loss for the decoder. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` kwargs: (`optional`) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. - With a `decoder_` prefix which will be input as `**decoder_kwargs` for the decoder forward function. Examples:: >>> from transformers import EncoderDecoderModel, BertTokenizer >>> import torch >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert >>> # forward >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids) >>> # training >>> loss, outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids)[:2] >>> # generation >>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.pad_token_id) """ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")} kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, **kwargs_encoder, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, inputs_embeds=decoder_inputs_embeds, attention_mask=decoder_attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, labels=labels, **kwargs_decoder, ) return decoder_outputs + encoder_outputs def prepare_inputs_for_generation(self, input_ids, past, attention_mask, **kwargs): assert past is not None, "past has to be defined for encoder_outputs" # first step if type(past) is tuple: encoder_outputs, _ = past else: encoder_outputs = (past,) decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids) return { "attention_mask": attention_mask, "decoder_attention_mask": decoder_inputs["attention_mask"], "decoder_input_ids": decoder_inputs["input_ids"], "encoder_outputs": encoder_outputs, } def _reorder_cache(self, past, beam_idx): # as a default encoder-decoder models do not re-order the past. # TODO(PVP): might have to be updated, e.g. if GPT2 is to be used as a decoder return past
17,175
53.182965
396
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/src/transformers/modeling_longformer.py
# coding=utf-8 # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Longformer model. """ import logging import math import warnings import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F from .configuration_longformer import LongformerConfig from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable from .modeling_bert import BertIntermediate, BertLayerNorm, BertOutput, BertPooler, BertPreTrainedModel, BertSelfOutput from .modeling_roberta import RobertaEmbeddings, RobertaLMHead from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer logger = logging.getLogger(__name__) _TOKENIZER_FOR_DOC = "LongformerTokenizer" LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "allenai/longformer-base-4096", "allenai/longformer-large-4096", "allenai/longformer-large-4096-finetuned-triviaqa", "allenai/longformer-base-4096-extra.pos.embd.only", "allenai/longformer-large-4096-extra.pos.embd.only", # See all Longformer models at https://huggingface.co/models?filter=longformer ] def _get_question_end_index(input_ids, sep_token_id): """ Computes the index of the first occurance of `sep_token_id`. """ sep_token_indices = (input_ids == sep_token_id).nonzero() batch_size = input_ids.shape[0] assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions" assert ( sep_token_indices.shape[0] == 3 * batch_size ), f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You might also consider to set `global_attention_mask` manually in the forward function to avoid this error." return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1] def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True): """ Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is True` else after `sep_token_id`. """ question_end_index = _get_question_end_index(input_ids, sep_token_id) question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1 # bool attention mask with True in locations of global attention attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device) if before_sep_token is True: attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.uint8) else: # last token is separation token and should not be counted and in the middle are two separation tokens attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.uint8) * ( attention_mask.expand_as(input_ids) < input_ids.shape[-1] ).to(torch.uint8) return attention_mask class LongformerSelfAttention(nn.Module): def __init__(self, config, layer_id): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_heads = config.num_attention_heads self.head_dim = int(config.hidden_size / config.num_attention_heads) self.embed_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.embed_dim) self.key = nn.Linear(config.hidden_size, self.embed_dim) self.value = nn.Linear(config.hidden_size, self.embed_dim) # separate projection layers for tokens with global attention self.query_global = nn.Linear(config.hidden_size, self.embed_dim) self.key_global = nn.Linear(config.hidden_size, self.embed_dim) self.value_global = nn.Linear(config.hidden_size, self.embed_dim) self.dropout = config.attention_probs_dropout_prob self.layer_id = layer_id attention_window = config.attention_window[self.layer_id] assert ( attention_window % 2 == 0 ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}" assert ( attention_window > 0 ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}" self.one_sided_attn_window_size = attention_window // 2 @staticmethod def _pad_and_transpose_last_two_dims(hidden_states_padded, padding): """pads rows and then flips rows and columns""" hidden_states_padded = F.pad( hidden_states_padded, padding ) # padding value is not important because it will be overwritten hidden_states_padded = hidden_states_padded.view( *hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2) ) return hidden_states_padded @staticmethod def _pad_by_window_overlap_except_last_row(chunked_hidden_states): """shift every row 1 step right, converting columns into diagonals""" total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = F.pad( chunked_hidden_states, (0, window_overlap + 1) ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states @staticmethod def _chunk(hidden_states, window_overlap): """convert into overlapping chunkings. Chunk size = 2w, overlap size = w""" # non-overlapping chunks of size = 2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2), window_overlap * 2, hidden_states.size(2), ) # use `as_strided` to make the chunks overlap with an overlap size = window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride = list(hidden_states.stride()) chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len) -> torch.Tensor: beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): """Matrix multiplication of query and key tensors using with a sliding window attention pattern. This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an overlap of size window_overlap""" batch_size, seq_len, num_heads, head_dim = query.size() assert ( seq_len % (window_overlap * 2) == 0 ), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}" assert query.size() == key.size() chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix multipication # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (chunked_query, chunked_key)) # multiply # convert diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0, 1) ) # allocate space for the overall attention matrix where the chunks are combined. The last dimension # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to # window_overlap previous words). The following column is attention score from each word to itself, then # followed by window_overlap columns for the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) ) # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions # - copying the main diagonal and the upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap + 1 ] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap + 1 ] # - copying the lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1) : -1, window_overlap + 1 : ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, : window_overlap - 1, 1 - window_overlap : ] # separate batch_size and num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 * window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn_probs_value( self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int ): """Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the same shape as `attn_probs`""" batch_size, seq_len, num_heads, head_dim = value.size() assert seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1 chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 ) # group batch_size and num_heads dimensions into one value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) # pad seq_len with w at the beginning of the sequence and another window overlap at the end padded_value = F.pad(value, (0, 0, window_overlap, window_overlap), value=-1) # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim) chunked_value_stride = padded_value.stride() chunked_value_stride = ( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_by_window_overlap_except_last_row(chunked_attn_probs) context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) def forward( self, hidden_states, attention_mask=None, output_attentions=False, ): """ LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`. Padding to `attention_window` happens in LongformerModel.forward to avoid redoing the padding on each layer. The `attention_mask` is changed in `BertModel.forward` from 0, 1, 2 to -ve: no attention 0: local attention +ve: global attention """ attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked or global attention is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = any(is_index_global_attn.flatten()) hidden_states = hidden_states.transpose(0, 1) # project hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert ( embed_dim == self.embed_dim ), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}" # normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad for attention probs remove_from_windowed_attention_mask = (attention_mask != 0).unsqueeze(dim=-1).unsqueeze(dim=-1) # cast to fp32/fp16 then replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask with zeros everywhere and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size ) # pad local attention probs attn_scores += diagonal_mask assert list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1, ], f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}" # compute local attention probs from global attention keys and contact over window dim if is_global_attn: # compute global attn indices required through out forward fn ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) = self._get_global_attn_indices(is_index_global_attn) # calculate global attn probs from global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to attn_probs # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory del global_key_attn_scores attn_probs_fp32 = F.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores) # free memory del attn_probs_fp32 # softmax sometimes inserts NaN if all positions are masked, replace them with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked.unsqueeze(-1).unsqueeze(-1), 0.0) # apply dropout attn_probs = F.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute local attention output with global attention value and add if is_global_attn: # compute sum of global and local attn attn_output = self._compute_attn_output_with_global_indices( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local attn only attn_output = self._sliding_chunks_matmul_attn_probs_value( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value for global attention and overwrite to attention output # TODO: remove the redundant computation if is_global_attn: global_attn_output = self._compute_global_attn_output_from_hidden( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only non zero global attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output = attn_output.transpose(0, 1) if output_attentions: if is_global_attn: # With global attention, return global attention probabilities only # batch_size x num_heads x max_num_global_attention_tokens x sequence_length # which is the attention weights from tokens with global attention to all tokens # It doesn't not return local attention # In case of variable number of global attantion in the rows of a batch, # attn_probs are padded with -10000.0 attention scores attn_probs = attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) else: # without global attention, return local attention probabilities # batch_size x num_heads x sequence_length x window_size # which is the attention weights of every token attending to its neighbours attn_probs = attn_probs.permute(0, 2, 1, 3) outputs = (attn_output, attn_probs) if output_attentions else (attn_output,) return outputs @staticmethod def _get_global_attn_indices(is_index_global_attn): """ compute global attn indices required throughout forward pass """ # helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number of global attn indices in batch max_num_global_attn_indices = num_global_attn_indices.max() # indices of global attn is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True) # helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding values within global attention indices is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True) # location of the padding values within global attention indices is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ): batch_size = key_vectors.shape[0] # create only global key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key def _compute_attn_output_with_global_indices( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, ): batch_size = attn_probs.shape[0] # cut local attn probs to global only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value vectors for global only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum` crashes sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn output only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape attn probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output_from_hidden( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero, is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2] # prepare global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) # compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert list(global_attn_scores.size()) == [ batch_size * self.num_heads, max_num_global_attn_indices, seq_len, ], f"global_attn_scores have the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is {global_attn_scores.size()}." global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked.unsqueeze(1).unsqueeze(2), -10000.0,) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) # compute global attn probs global_attn_probs_float = F.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) # use fp32 for numerical stability global_attn_probs = F.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert list(global_attn_output.size()) == [ batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim, ], f"global_attn_output tensor has the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is {global_attn_output.size()}." global_attn_output = global_attn_output.view( batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim ) return global_attn_output class LongformerAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.self = LongformerSelfAttention(config, layer_id) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, output_attentions=False, ): self_outputs = self.self(hidden_states, attention_mask, output_attentions,) attn_output = self.output(self_outputs[0], hidden_states) outputs = (attn_output,) + self_outputs[1:] # add attentions if we output them return outputs class LongformerLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.attention = LongformerAttention(config, layer_id) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_mask=None, output_attentions=False, ): self_attn_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions,) attn_output = self_attn_outputs[0] outputs = self_attn_outputs[1:] # add self attentions if we output attention weights intermediate_output = self.intermediate(attn_output) layer_output = self.output(intermediate_output, attn_output) outputs = (layer_output,) + outputs return outputs class LongformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, ): all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if getattr(self.config, "gradient_checkpointing", False): def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, ) else: layer_outputs = layer_module(hidden_states, attention_mask, output_attentions,) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (all_hidden_states,) if output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class LongformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LongformerConfig base_model_prefix = "longformer" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() LONGFORMER_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.LongformerConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ LONGFORMER_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.LonmgformerTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ global_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`): Mask to decide the attention given on each token, local attention or global attenion. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the `Longformer paper <https://arxiv.org/abs/2004.05150>`__ for more details. Mask values selected in ``[0, 1]``: ``0`` for local attention (a sliding window attention), ``1`` for global attention (tokens that attend to all other tokens, and all other tokens attend to them). token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`): If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail. """ @add_start_docstrings( "The bare Longformer Model outputting raw hidden-states without any specific head on top.", LONGFORMER_START_DOCSTRING, ) class LongformerModel(LongformerPreTrainedModel): """ This class overrides :class:`~transformers.RobertaModel` to provide the ability to process long sequences following the selfattention approach described in `Longformer: the Long-Document Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer selfattention combines a local (sliding window) and global attention to extend to long documents without the O(n^2) increase in memory and compute. The selfattention module `LongformerSelfAttention` implemented here supports the combination of local and global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA kernel to be memory and compute efficient. """ config_class = LongformerConfig base_model_prefix = "longformer" def __init__(self, config): super().__init__(config) self.config = config if isinstance(config.attention_window, int): assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value" assert config.attention_window > 0, "`config.attention_window` has to be positive" config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer else: assert len(config.attention_window) == config.num_hidden_layers, ( "`len(config.attention_window)` should equal `config.num_hidden_layers`. " f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}" ) self.embeddings = RobertaEmbeddings(config) self.encoder = LongformerEncoder(config) self.pooler = BertPooler(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def _pad_to_window_size( self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int, ): """A helper function to pad tokens and mask to work with implementation of Longformer selfattention.""" # padding attention_window = ( self.config.attention_window if isinstance(self.config.attention_window, int) else max(self.config.attention_window) ) assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}" input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape batch_size, seq_len = input_shape[:2] padding_len = (attention_window - seq_len % attention_window) % attention_window if padding_len > 0: logger.info( "Input ids are automatically padded from {} to {} to be a multiple of `config.attention_window`: {}".format( seq_len, seq_len + padding_len, attention_window ) ) if input_ids is not None: input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: # pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full( (batch_size, padding_len), self.config.pad_token_id, dtype=torch.long, ) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn) # (global_attention_mask + 1) => 1 for local attention, 2 for global attention # => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention if attention_mask is not None: attention_mask = attention_mask * (global_attention_mask + 1) else: # simply use `global_attention_mask` as `attention_mask` # if no `attention_mask` is given attention_mask = global_attention_mask + 1 return attention_mask @add_start_docstrings_to_callable(LONGFORMER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, ): r""" Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs: prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`) Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: >>> import torch >>> from transformers import LongformerModel, LongformerTokenizer >>> model = LongformerModel.from_pretrained('allenai/longformer-base-4096') >>> tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') >>> SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document >>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 >>> # Attention mask values -- 0: no attention, 1: local attention, 2: global attention >>> attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to local attention >>> attention_mask[:, [1, 4, 21,]] = 2 # Set global attention based on the task. For example, ... # classification: the <s> token ... # QA: question tokens ... # LM: potentially on the beginning of sentences and paragraphs >>> sequence_output, pooled_output = model(input_ids, attention_mask=attention_mask) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # merge `global_attention_mask` and `attention_mask` if global_attention_mask is not None: attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask) padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id, ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here # undo padding if padding_len > 0: # `output` has the following tensors: sequence_output, pooled_output, (hidden_states), (attentions) # `sequence_output`: unpad because the calling function is expecting a length == input_ids.size(1) # `pooled_output`: independent of the sequence length # `hidden_states`: mainly used for debugging and analysis, so keep the padding # `attentions`: mainly used for debugging and analysis, so keep the padding outputs = outputs[0][:, :-padding_len], *outputs[1:] return outputs @add_start_docstrings("""Longformer Model with a `language modeling` head on top. """, LONGFORMER_START_DOCSTRING) class LongformerForMaskedLM(LongformerPreTrainedModel): config_class = LongformerConfig base_model_prefix = "longformer" def __init__(self, config): super().__init__(config) self.longformer = LongformerModel(config) self.lm_head = RobertaLMHead(config) self.init_weights() @add_start_docstrings_to_callable(LONGFORMER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, **kwargs ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): Used to hide legacy arguments that have been deprecated. Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs: masked_lm_loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`) Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: >>> import torch >>> from transformers import LongformerForMaskedLM, LongformerTokenizer >>> model = LongformerForMaskedLM.from_pretrained('allenai/longformer-base-4096') >>> tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') >>> SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document >>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 >>> attention_mask = None # default is local attention everywhere, which is a good choice for MaskedLM ... # check ``LongformerModel.forward`` for more details how to set `attention_mask` >>> loss, prediction_scores = model(input_ids, attention_mask=attention_mask, labels=input_ids) """ if "masked_lm_labels" in kwargs: warnings.warn( "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.", DeprecationWarning, ) labels = kwargs.pop("masked_lm_labels") assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}." outputs = self.longformer( input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) outputs = (masked_lm_loss,) + outputs return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) @add_start_docstrings( """Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, LONGFORMER_START_DOCSTRING, ) class LongformerForSequenceClassification(BertPreTrainedModel): config_class = LongformerConfig base_model_prefix = "longformer" def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.longformer = LongformerModel(config) self.classifier = LongformerClassificationHead(config) self.init_weights() @add_start_docstrings_to_callable(LONGFORMER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="allenai/longformer-base-4096") def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.LongformerConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ if global_attention_mask is None: logger.info("Initializing global attention on CLS token...") global_attention_mask = torch.zeros_like(input_ids) # global attention on cls token global_attention_mask[:, 0] = 1 outputs = self.longformer( input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) class LongformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, hidden_states, **kwargs): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) output = self.out_proj(hidden_states) return output @add_start_docstrings( """Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, LONGFORMER_START_DOCSTRING, ) class LongformerForQuestionAnswering(BertPreTrainedModel): config_class = LongformerConfig base_model_prefix = "longformer" def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.longformer = LongformerModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(LONGFORMER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.LongformerConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): Span-end scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: >>> from transformers import LongformerTokenizer, LongformerForQuestionAnswering >>> import torch >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") >>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> encoding = tokenizer(question, text, return_tensors="pt") >>> input_ids = encoding["input_ids"] >>> # default is local attention everywhere >>> # the forward method will automatically set global attention on question tokens >>> attention_mask = encoding["attention_mask"] >>> start_scores, end_scores = model(input_ids, attention_mask=attention_mask) >>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist()) >>> answer_tokens = all_tokens[torch.argmax(start_scores) :torch.argmax(end_scores)+1] >>> answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens)) # remove space prepending space token """ # set global attention on question tokens if global_attention_mask is None: logger.info("Initializing global attention on question tokens...") # put global attention on all tokens until `config.sep_token_id` is reached global_attention_mask = _compute_global_attention_mask(input_ids, self.config.sep_token_id) outputs = self.longformer( input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = (start_logits, end_logits,) + outputs[2:] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) @add_start_docstrings( """Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, LONGFORMER_START_DOCSTRING, ) class LongformerForTokenClassification(BertPreTrainedModel): config_class = LongformerConfig base_model_prefix = "longformer" def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.longformer = LongformerModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(LONGFORMER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="allenai/longformer-base-4096") def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.LongformerConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) : Classification loss. scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`) Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ outputs = self.longformer( input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), scores, (hidden_states), (attentions) @add_start_docstrings( """Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, LONGFORMER_START_DOCSTRING, ) class LongformerForMultipleChoice(BertPreTrainedModel): config_class = LongformerConfig base_model_prefix = "longformer" def __init__(self, config): super().__init__(config) self.longformer = LongformerModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() @add_start_docstrings_to_callable(LONGFORMER_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)")) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="allenai/longformer-base-4096") def forward( self, input_ids=None, token_type_ids=None, attention_mask=None, global_attention_mask=None, labels=None, position_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs: loss (:obj:`torch.FloatTensor`` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided): Classification loss. classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`): `num_choices` is the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] # set global attention on question tokens if global_attention_mask is None: logger.info("Initializing global attention on multiple choice...") # put global attention on all tokens after `config.sep_token_id` global_attention_mask = torch.stack( [ _compute_global_attention_mask(input_ids[:, i], self.config.sep_token_id, before_sep_token=False) for i in range(num_choices) ], dim=1, ) flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_global_attention_mask = ( global_attention_mask.view(-1, global_attention_mask.size(-1)) if global_attention_mask is not None else None ) flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.longformer( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, global_attention_mask=flat_global_attention_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) outputs = (loss,) + outputs return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
76,779
47.935628
222
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/src/transformers/modeling_tf_albert.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 ALBERT model. """ import logging import tensorflow as tf from .configuration_albert import AlbertConfig from .file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, ) from .modeling_tf_bert import ACT2FN, TFBertSelfAttention from .modeling_tf_utils import ( TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, cast_bool_to_primitive, get_initializer, keras_serializable, shape_list, ) from .tokenization_utils import BatchEncoding logger = logging.getLogger(__name__) _TOKENIZER_FOR_DOC = "AlbertTokenizer" TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", # See all ALBERT models at https://huggingface.co/models?filter=albert ] class TFAlbertEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.vocab_size = config.vocab_size self.position_embeddings = tf.keras.layers.Embedding( config.max_position_embeddings, config.embedding_size, embeddings_initializer=get_initializer(self.config.initializer_range), name="position_embeddings", ) self.token_type_embeddings = tf.keras.layers.Embedding( config.type_vocab_size, config.embedding_size, embeddings_initializer=get_initializer(self.config.initializer_range), name="token_type_embeddings", ) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def build(self, input_shape): """Build shared word embedding layer """ with tf.name_scope("word_embeddings"): # Create and initialize weights. The random normal initializer was chosen # arbitrarily, and works well. self.word_embeddings = self.add_weight( "weight", shape=[self.config.vocab_size, self.config.embedding_size], initializer=get_initializer(self.config.initializer_range), ) super().build(input_shape) def call(self, inputs, mode="embedding", training=False): """Get token embeddings of inputs. Args: inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids) mode: string, a valid value is one of "embedding" and "linear". Returns: outputs: (1) If mode == "embedding", output embedding tensor, float32 with shape [batch_size, length, embedding_size]; (2) mode == "linear", output linear tensor, float32 with shape [batch_size, length, vocab_size]. Raises: ValueError: if mode is not valid. Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ if mode == "embedding": return self._embedding(inputs, training=training) elif mode == "linear": return self._linear(inputs) else: raise ValueError("mode {} is not valid.".format(mode)) def _embedding(self, inputs, training=False): """Applies embedding based on inputs tensor.""" input_ids, position_ids, token_type_ids, inputs_embeds = inputs if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :] if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) if inputs_embeds is None: inputs_embeds = tf.gather(self.word_embeddings, input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings def _linear(self, inputs): """Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [batch_size, length, embedding_size] Returns: float32 tensor with shape [batch_size, length, vocab_size]. """ batch_size = shape_list(inputs)[0] length = shape_list(inputs)[1] x = tf.reshape(inputs, [-1, self.config.embedding_size]) logits = tf.matmul(x, self.word_embeddings, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.config.vocab_size]) class TFAlbertSelfAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, inputs, training=False): hidden_states, attention_mask, head_mask, output_attentions = inputs batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) # scale attention_scores dk = tf.cast(shape_list(key_layer)[-1], tf.float32) attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = ( (context_layer, attention_probs) if cast_bool_to_primitive(output_attentions) is True else (context_layer,) ) return outputs class TFAlbertSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, inputs, training=False): hidden_states, input_tensor = inputs hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFAlbertAttention(TFBertSelfAttention): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.hidden_size = config.hidden_size self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.pruned_heads = set() def prune_heads(self, heads): raise NotImplementedError def call(self, inputs, training=False): input_tensor, attention_mask, head_mask, output_attentions = inputs batch_size = shape_list(input_tensor)[0] mixed_query_layer = self.query(input_tensor) mixed_key_layer = self.key(input_tensor) mixed_value_layer = self.value(input_tensor) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) # scale attention_scores dk = tf.cast(shape_list(key_layer)[-1], tf.float32) attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) self_outputs = ( (context_layer, attention_probs) if cast_bool_to_primitive(output_attentions) is True else (context_layer,) ) hidden_states = self_outputs[0] hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) attention_output = self.LayerNorm(hidden_states + input_tensor) # add attentions if we output them outputs = (attention_output,) + self_outputs[1:] return outputs class TFAlbertLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFAlbertAttention(config, name="attention") self.ffn = tf.keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn" ) if isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act self.ffn_output = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output" ) self.full_layer_layer_norm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="full_layer_layer_norm" ) self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, inputs, training=False): hidden_states, attention_mask, head_mask, output_attentions = inputs attention_outputs = self.attention( [hidden_states, attention_mask, head_mask, output_attentions], training=training ) ffn_output = self.ffn(attention_outputs[0]) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(ffn_output) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.full_layer_layer_norm(ffn_output + attention_outputs[0]) # add attentions if we output them outputs = (hidden_states,) + attention_outputs[1:] return outputs class TFAlbertLayerGroup(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.albert_layers = [ TFAlbertLayer(config, name="albert_layers_._{}".format(i)) for i in range(config.inner_group_num) ] def call(self, inputs, training=False): hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states = inputs layer_hidden_states = () layer_attentions = () for layer_index, albert_layer in enumerate(self.albert_layers): layer_output = albert_layer( [hidden_states, attention_mask, head_mask[layer_index], output_attentions], training=training ) hidden_states = layer_output[0] if cast_bool_to_primitive(output_attentions) is True: layer_attentions = layer_attentions + (layer_output[1],) if cast_bool_to_primitive(output_hidden_states) is True: layer_hidden_states = layer_hidden_states + (hidden_states,) outputs = (hidden_states,) if cast_bool_to_primitive(output_hidden_states) is True: outputs = outputs + (layer_hidden_states,) if cast_bool_to_primitive(output_attentions) is True: outputs = outputs + (layer_attentions,) # last-layer hidden state, (layer hidden states), (layer attentions) return outputs class TFAlbertTransformer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_hidden_mapping_in = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="embedding_hidden_mapping_in", ) self.albert_layer_groups = [ TFAlbertLayerGroup(config, name="albert_layer_groups_._{}".format(i)) for i in range(config.num_hidden_groups) ] def call(self, inputs, training=False): hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states = inputs hidden_states = self.embedding_hidden_mapping_in(hidden_states) all_attentions = () if cast_bool_to_primitive(output_hidden_states) is True: all_hidden_states = (hidden_states,) for i in range(self.config.num_hidden_layers): # Number of layers in a hidden group layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups) # Index of the hidden group group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( [ hidden_states, attention_mask, head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group], output_attentions, output_hidden_states, ], training=training, ) hidden_states = layer_group_output[0] if cast_bool_to_primitive(output_attentions) is True: all_attentions = all_attentions + layer_group_output[-1] if cast_bool_to_primitive(output_hidden_states) is True: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if cast_bool_to_primitive(output_hidden_states) is True: outputs = outputs + (all_hidden_states,) if cast_bool_to_primitive(output_attentions) is True: outputs = outputs + (all_attentions,) # last-layer hidden state, (all hidden states), (all attentions) return outputs class TFAlbertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AlbertConfig base_model_prefix = "albert" class TFAlbertMLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.dense = tf.keras.layers.Dense( config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") self.decoder_bias = self.add_weight( shape=(self.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias" ) super().build(input_shape) def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) hidden_states = self.decoder(hidden_states, mode="linear") + self.decoder_bias return hidden_states @keras_serializable class TFAlbertMainLayer(tf.keras.layers.Layer): config_class = AlbertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.num_hidden_layers = config.num_hidden_layers self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.embeddings = TFAlbertEmbeddings(config, name="embeddings") self.encoder = TFAlbertTransformer(config, name="encoder") self.pooler = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="pooler", ) def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value self.embeddings.vocab_size = value.shape[0] def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, training=False, ): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds output_attentions = inputs[6] if len(inputs) > 6 else output_attentions output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states assert len(inputs) <= 8, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) assert len(inputs) <= 8, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, tf.float32) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) embedding_output = self.embeddings([input_ids, position_ids, token_type_ids, inputs_embeds], training=training) encoder_outputs = self.encoder( [embedding_output, extended_attention_mask, head_mask, output_attentions, output_hidden_states], training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output[:, 0]) # add hidden_states and attentions if they are here outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # sequence_output, pooled_output, (hidden_states), (attentions) return outputs ALBERT_START_DOCSTRING = r""" This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. _`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`: https://arxiv.org/abs/1909.11942 .. _`tf.keras.Model`: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` Args: config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ALBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`{0}`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.AlbertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`{0}`, `optional, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. training (:obj:`boolean`, `optional`, defaults to :obj:`False`): Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them (if set to :obj:`False`) for evaluation. output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`): If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail. """ @add_start_docstrings( "The bare Albert Model transformer outputing raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) class TFAlbertModel(TFAlbertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name="albert") @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="albert-base-v2") def call(self, inputs, **kwargs): r""" Returns: :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs: last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Albert pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ outputs = self.albert(inputs, **kwargs) return outputs @add_start_docstrings( """Albert Model with two heads on top for pre-training: a `masked language modeling` head and a `sentence order prediction` (classification) head. """, ALBERT_START_DOCSTRING, ) class TFAlbertForPreTraining(TFAlbertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.predictions = TFAlbertMLMHead(config, self.albert.embeddings, name="predictions") self.sop_classifier = TFAlbertSOPHead(config, name="sop_classifier") def get_output_embeddings(self): return self.albert.embeddings @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) def call(self, inputs, **kwargs): r""" Return: :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). sop_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, 2)`): Prediction scores of the sentence order prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import AlbertTokenizer, TFAlbertForPreTraining tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertForPreTraining.from_pretrained('albert-base-v2') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1 outputs = model(input_ids) prediction_scores, sop_scores = outputs[:2] """ outputs = self.albert(inputs, **kwargs) sequence_output, pooled_output = outputs[:2] prediction_scores = self.predictions(sequence_output) sop_scores = self.sop_classifier(pooled_output, training=kwargs.get("training", False)) outputs = (prediction_scores, sop_scores) + outputs[2:] return outputs class TFAlbertSOPHead(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dropout = tf.keras.layers.Dropout(config.classifier_dropout_prob) self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) def call(self, pooled_output, training: bool): dropout_pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(dropout_pooled_output) return logits @add_start_docstrings("""Albert Model with a `language modeling` head on top. """, ALBERT_START_DOCSTRING) class TFAlbertForMaskedLM(TFAlbertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name="albert") self.predictions = TFAlbertMLMHead(config, self.albert.embeddings, name="predictions") def get_output_embeddings(self): return self.albert.embeddings @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="albert-base-v2") def call(self, inputs, **kwargs): r""" Returns: :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs: prediction_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ outputs = self.albert(inputs, **kwargs) sequence_output = outputs[0] prediction_scores = self.predictions(sequence_output, training=kwargs.get("training", False)) # Add hidden states and attention if they are here outputs = (prediction_scores,) + outputs[2:] return outputs # prediction_scores, (hidden_states), (attentions) @add_start_docstrings( """Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.dropout = tf.keras.layers.Dropout(config.classifier_dropout_prob) self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="albert-base-v2") def call( self, inputs=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Returns: :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs: logits (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`) Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ if isinstance(inputs, (tuple, list)): labels = inputs[8] if len(inputs) > 8 else labels if len(inputs) > 8: inputs = inputs[:8] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.albert( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss = self.compute_loss(labels, logits) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) @add_start_docstrings( """Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="albert-base-v2") def call( self, inputs=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. Return: :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ if isinstance(inputs, (tuple, list)): labels = inputs[8] if len(inputs) > 8 else labels if len(inputs) > 8: inputs = inputs[:8] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.albert( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss = self.compute_loss(labels, logits) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) @add_start_docstrings( """Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ALBERT_START_DOCSTRING, ) class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.qa_outputs = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="albert-base-v2") def call( self, inputs=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, start_positions=None, end_positions=None, training=False, ): r""" start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Return: :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs: start_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`): Span-end scores (before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ if isinstance(inputs, (tuple, list)): start_positions = inputs[8] if len(inputs) > 8 else start_positions end_positions = inputs[9] if len(inputs) > 9 else end_positions if len(inputs) > 8: inputs = inputs[:8] elif isinstance(inputs, (dict, BatchEncoding)): start_positions = inputs.pop("start_positions", start_positions) end_positions = inputs.pop("end_positions", start_positions) outputs = self.albert( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) outputs = (start_logits, end_logits,) + outputs[2:] if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.compute_loss(labels, outputs[:2]) outputs = (loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) @add_start_docstrings( """Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name="albert") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)")) @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="albert-base-v2") def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) Return: :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: classification_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`: `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds output_attentions = inputs[6] if len(inputs) > 6 else output_attentions output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states labels = inputs[8] if len(inputs) > 8 else labels assert len(inputs) <= 9, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_attentions) labels = inputs.get("labels", labels) assert len(inputs) <= 9, "Too many inputs." else: input_ids = inputs if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs = [ flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, ] outputs = self.albert(flat_inputs, training=training) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss = self.compute_loss(labels, reshaped_logits) outputs = (loss,) + outputs return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
59,568
46.239492
221
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/src/transformers/optimization.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import logging import math from typing import Callable, Iterable, Tuple import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR logger = logging.getLogger(__name__) def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1): """ Create a schedule with a constant learning rate, using the learning rate set in optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The totale number of training steps. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. num_cycles (:obj:`float`, `optional`, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_with_hard_restarts_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. num_cycles (:obj:`int`, `optional`, defaults to 1): The number of hard restarts to use. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) return LambdaLR(optimizer, lr_lambda, last_epoch) class AdamW(Optimizer): """ Implements Adam algorithm with weight decay fix as introduced in `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__. Parameters: params (:obj:`Iterable[torch.nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (:obj:`float`, `optional`, defaults to 1e-3): The learning rate to use. betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)): Adam's betas parameters (b1, b2). eps (:obj:`float`, `optional`, defaults to 1e-6): Adam's epsilon for numerical stability. weight_decay (:obj:`float`, `optional`, defaults to 0): Decoupled weight decay to apply. correct_bias (:obj:`bool`, `optional`, defaults to `True`): Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`). """ def __init__( self, params: Iterable[torch.nn.parameter.Parameter], lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-6, weight_decay: float = 0.0, correct_bias: bool = True, ): if lr < 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1])) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias) super().__init__(params, defaults) def step(self, closure: Callable = None): """ Performs a single optimization step. Arguments: closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead") state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) denom = exp_avg_sq.sqrt().add_(group["eps"]) step_size = group["lr"] if group["correct_bias"]: # No bias correction for Bert bias_correction1 = 1.0 - beta1 ** state["step"] bias_correction2 = 1.0 - beta2 ** state["step"] step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(exp_avg, denom, value=-step_size) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. # Add weight decay at the end (fixed version) if group["weight_decay"] > 0.0: p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"]) return loss
11,584
42.227612
119
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/src/transformers/modeling_mmbt.py
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MMBT model. """ import logging import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss from .file_utils import add_start_docstrings from .modeling_utils import ModuleUtilsMixin logger = logging.getLogger(__name__) class ModalEmbeddings(nn.Module): """Generic Modal Embeddings which takes in an encoder, and a transformer embedding. """ def __init__(self, config, encoder, embeddings): super().__init__() self.config = config self.encoder = encoder self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size) self.position_embeddings = embeddings.position_embeddings self.token_type_embeddings = embeddings.token_type_embeddings self.word_embeddings = embeddings.word_embeddings self.LayerNorm = embeddings.LayerNorm self.dropout = nn.Dropout(p=config.hidden_dropout_prob) def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None): token_embeddings = self.proj_embeddings(self.encoder(input_modal)) seq_length = token_embeddings.size(1) if start_token is not None: start_token_embeds = self.word_embeddings(start_token) seq_length += 1 token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1) if end_token is not None: end_token_embeds = self.word_embeddings(end_token) seq_length += 1 token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device) position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length) if token_type_ids is None: token_type_ids = torch.zeros( (input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device ) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = token_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings MMBT_START_DOCSTRING = r""" MMBT model was proposed in `Supervised Multimodal Bitransformers for Classifying Images and Text`_ by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine. It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and obtain state-of-the-art performance on various multimodal classification benchmark tasks. This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`Supervised Multimodal Bitransformers for Classifying Images and Text`: https://github.com/facebookresearch/mmbt .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module Parameters: config (:class:`~transformers.MMBTConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. transformer (:class: `~nn.Module`): A text transformer that is used by MMBT. It should have embeddings, encoder, and pooler attributes. encoder (:class: `~nn.Module`): Encoder for the second modality. It should take in a batch of modal inputs and return k, n dimension embeddings. """ MMBT_INPUTS_DOCSTRING = r""" Inputs: **input_modal**: ``torch.FloatTensor`` of shape ``(batch_size, ***)``: The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image Encoder, the shape would be (batch_size, channels, height, width) **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's appended to the end of other modality embeddings. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **modal_start_tokens**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for Classification tasks. **modal_end_tokens**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Segment token indices to indicate different portions of the inputs. **modal_token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``: Segment token indices to indicate different portions of the non-text modality. The embeddings from these tokens will be summed with the respective token embeddings for the non-text modality. **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. **modal_position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings for the non-text modality. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. **encoder_hidden_states**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``: Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. **encoder_attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`): If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail. """ @add_start_docstrings( "The bare MMBT Model outputting raw hidden-states without any specific head on top.", MMBT_START_DOCSTRING, MMBT_INPUTS_DOCSTRING, ) class MMBTModel(nn.Module, ModuleUtilsMixin): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Bert pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: # For example purposes. Not runnable. transformer = BertModel.from_pretrained('bert-base-uncased') encoder = ImageEncoder(args) mmbt = MMBTModel(config, transformer, encoder) """ def __init__(self, config, transformer, encoder): super().__init__() self.config = config self.transformer = transformer self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings) def forward( self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_hidden_states=None, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_txt_shape = input_ids.size() elif inputs_embeds is not None: input_txt_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device modal_embeddings = self.modal_encoder( input_modal, start_token=modal_start_tokens, end_token=modal_end_tokens, position_ids=modal_position_ids, token_type_ids=modal_token_type_ids, ) input_modal_shape = modal_embeddings.size()[:-1] if token_type_ids is None: token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device) txt_embeddings = self.transformer.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1) input_shape = embedding_output.size()[:-1] if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) else: attention_mask = torch.cat( [torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1 ) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) else: encoder_attention_mask = torch.cat( [torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1 ) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, self.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.transformer.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_hidden_states=output_hidden_states, ) sequence_output = encoder_outputs[0] pooled_output = self.transformer.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @add_start_docstrings( """MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)""", MMBT_START_DOCSTRING, MMBT_INPUTS_DOCSTRING, ) class MMBTForClassification(nn.Module): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: # For example purposes. Not runnable. transformer = BertModel.from_pretrained('bert-base-uncased') encoder = ImageEncoder(args) model = MMBTForClassification(config, transformer, encoder) outputs = model(input_modal, input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config, transformer, encoder): super().__init__() self.num_labels = config.num_labels self.mmbt = MMBTModel(config, transformer, encoder) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward( self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, labels=None, ): outputs = self.mmbt( input_modal=input_modal, input_ids=input_ids, modal_start_tokens=modal_start_tokens, modal_end_tokens=modal_end_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, modal_token_type_ids=modal_token_type_ids, position_ids=position_ids, modal_position_ids=modal_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions)
18,628
49.077957
151
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/src/transformers/modeling_marian.py
# coding=utf-8 # Copyright 2020 Marian Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MarianMTModel model, ported from the Marian C++ repo.""" from transformers.modeling_bart import BartForConditionalGeneration MARIAN_PRETRAINED_MODEL_ARCHIVE_LIST = [ # See all Marian models at https://huggingface.co/models?search=Helsinki-NLP ] class MarianMTModel(BartForConditionalGeneration): r""" Pytorch version of marian-nmt's transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Model API is identical to BartForConditionalGeneration. Available models are listed at `Model List <https://huggingface.co/models?search=Helsinki-NLP>`__ Examples:: >>> from transformers import MarianTokenizer, MarianMTModel >>> from typing import List >>> src = 'fr' # source language >>> trg = 'en' # target language >>> sample_text = "où est l'arrêt de bus ?" >>> mname = f'Helsinki-NLP/opus-mt-{src}-{trg}' >>> model = MarianMTModel.from_pretrained(mname) >>> tok = MarianTokenizer.from_pretrained(mname) >>> batch = tok.prepare_translation_batch(src_texts=[sample_text]) # don't need tgt_text for inference >>> gen = model.generate(**batch) # for forward pass: model(**batch) >>> words: List[str] = tok.batch_decode(gen, skip_special_tokens=True) # returns "Where is the the bus stop ?" """ def adjust_logits_during_generation(self, logits, cur_len, max_length): logits[:, self.config.pad_token_id] = float("-inf") if cur_len == max_length - 1 and self.config.eos_token_id is not None: self._force_token_ids_generation(logits, self.config.eos_token_id) return logits
2,289
41.407407
119
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/src/transformers/configuration_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration base class and utilities.""" import copy import json import logging import os from typing import Dict, Tuple from .file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_remote_url logger = logging.getLogger(__name__) class PretrainedConfig(object): r""" Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations. Note: A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights. It only affects the model's configuration. Class attributes (overridden by derived classes): - ``model_type``: a string that identifies the model type, that we serialize into the JSON file, and that we use to recreate the correct object in :class:`~transformers.AutoConfig`. Args: finetuning_task (:obj:`string` or :obj:`None`, `optional`, defaults to :obj:`None`): Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint. num_labels (:obj:`int`, `optional`, defaults to `2`): Number of classes to use when the model is a classification model (sequences/tokens) output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`): Should the model returns all hidden-states. output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`): Should the model returns all attentions. torchscript (:obj:`bool`, `optional`, defaults to :obj:`False`): Is the model used with Torchscript (for PyTorch models). """ model_type: str = "" def __init__(self, **kwargs): # Attributes with defaults self.output_hidden_states = kwargs.pop("output_hidden_states", False) self.output_attentions = kwargs.pop("output_attentions", False) self.use_cache = kwargs.pop("use_cache", True) # Not used by all models self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models self.use_bfloat16 = kwargs.pop("use_bfloat16", False) self.pruned_heads = kwargs.pop("pruned_heads", {}) # Is decoder is used in encoder-decoder models to differentiate encoder from decoder self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False) self.is_decoder = kwargs.pop("is_decoder", False) # Parameters for sequence generation self.max_length = kwargs.pop("max_length", 20) self.min_length = kwargs.pop("min_length", 0) self.do_sample = kwargs.pop("do_sample", False) self.early_stopping = kwargs.pop("early_stopping", False) self.num_beams = kwargs.pop("num_beams", 1) self.temperature = kwargs.pop("temperature", 1.0) self.top_k = kwargs.pop("top_k", 50) self.top_p = kwargs.pop("top_p", 1.0) self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0) self.length_penalty = kwargs.pop("length_penalty", 1.0) self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0) self.bad_words_ids = kwargs.pop("bad_words_ids", None) self.num_return_sequences = kwargs.pop("num_return_sequences", 1) # Fine-tuning task arguments self.architectures = kwargs.pop("architectures", None) self.finetuning_task = kwargs.pop("finetuning_task", None) self.id2label = kwargs.pop("id2label", None) self.label2id = kwargs.pop("label2id", None) if self.id2label is not None: kwargs.pop("num_labels", None) self.id2label = dict((int(key), value) for key, value in self.id2label.items()) # Keys are always strings in JSON so convert ids to int here. else: self.num_labels = kwargs.pop("num_labels", 2) # Tokenizer arguments TODO: eventually tokenizer and models should share the same config self.prefix = kwargs.pop("prefix", None) self.bos_token_id = kwargs.pop("bos_token_id", None) self.pad_token_id = kwargs.pop("pad_token_id", None) self.eos_token_id = kwargs.pop("eos_token_id", None) self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None) # task specific arguments self.task_specific_params = kwargs.pop("task_specific_params", None) # TPU arguments self.xla_device = kwargs.pop("xla_device", None) # Additional attributes without default values for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error("Can't set {} with value {} for {}".format(key, value, self)) raise err @property def num_labels(self): return len(self.id2label) @num_labels.setter def num_labels(self, num_labels): self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)} self.label2id = dict(zip(self.id2label.values(), self.id2label.keys())) def save_pretrained(self, save_directory): """ Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method. Args: save_directory (:obj:`string`): Directory where the configuration JSON file will be saved. """ if os.path.isfile(save_directory): raise AssertionError("Provided path ({}) should be a directory, not a file".format(save_directory)) os.makedirs(save_directory, exist_ok=True) # If we save using the predefined names, we can load using `from_pretrained` output_config_file = os.path.join(save_directory, CONFIG_NAME) self.to_json_file(output_config_file, use_diff=True) logger.info("Configuration saved in {}".format(output_config_file)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> "PretrainedConfig": r""" Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration. Args: pretrained_model_name_or_path (:obj:`string`): either: - a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing a configuration file saved using the :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``. - a path or url to a saved configuration JSON `file`, e.g.: ``./my_model_directory/configuration.json``. cache_dir (:obj:`string`, `optional`): Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. kwargs (:obj:`Dict[str, any]`, `optional`): The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Force to (re-)download the model weights and configuration files and override the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. proxies (:obj:`Dict`, `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g.: :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. return_unused_kwargs: (`optional`) bool: If False, then this function returns just the final configuration object. If True, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part of kwargs which has not been used to update `config` and is otherwise ignored. Returns: :class:`PretrainedConfig`: An instance of a configuration object Examples:: # We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a # derived class: BertConfig config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json') config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False) assert config.output_attention == True config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True) assert config.output_attention == True assert unused_kwargs == {'foo': False} """ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) return cls.from_dict(config_dict, **kwargs) @classmethod def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs) -> Tuple[Dict, Dict]: """ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a Config using `from_dict`. Parameters: pretrained_model_name_or_path (:obj:`string`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. Returns: :obj:`Tuple[Dict, Dict]`: The dictionary that will be used to instantiate the configuration object. """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) if os.path.isdir(pretrained_model_name_or_path): config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): config_file = pretrained_model_name_or_path else: config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False) try: # Load from URL or cache if already cached resolved_config_file = cached_path( config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) # Load config dict if resolved_config_file is None: raise EnvironmentError config_dict = cls._dict_from_json_file(resolved_config_file) except EnvironmentError: msg = ( f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n" ) raise EnvironmentError(msg) except json.JSONDecodeError: msg = ( "Couldn't reach server at '{}' to download configuration file or " "configuration file is not a valid JSON file. " "Please check network or file content here: {}.".format(config_file, resolved_config_file) ) raise EnvironmentError(msg) if resolved_config_file == config_file: logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading configuration file {} from cache at {}".format(config_file, resolved_config_file)) return config_dict, kwargs @classmethod def from_dict(cls, config_dict: Dict, **kwargs) -> "PretrainedConfig": """ Constructs a `Config` from a Python dictionary of parameters. Args: config_dict (:obj:`Dict[str, any]`): Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved from a pre-trained checkpoint by leveraging the :func:`~transformers.PretrainedConfig.get_config_dict` method. kwargs (:obj:`Dict[str, any]`): Additional parameters from which to initialize the configuration object. Returns: :class:`PretrainedConfig`: An instance of a configuration object """ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) config = cls(**config_dict) if hasattr(config, "pruned_heads"): config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items()) # Update config with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info("Model config %s", str(config)) if return_unused_kwargs: return config, kwargs else: return config @classmethod def from_json_file(cls, json_file: str) -> "PretrainedConfig": """ Constructs a `Config` from the path to a json file of parameters. Args: json_file (:obj:`string`): Path to the JSON file containing the parameters. Returns: :class:`PretrainedConfig`: An instance of a configuration object """ config_dict = cls._dict_from_json_file(json_file) return cls(**config_dict) @classmethod def _dict_from_json_file(cls, json_file: str): with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return json.loads(text) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return "{} {}".format(self.__class__.__name__, self.to_json_string()) def to_diff_dict(self): """ Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ config_dict = self.to_dict() # get the default config dict default_config_dict = PretrainedConfig().to_dict() serializable_config_dict = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if key not in default_config_dict or value != default_config_dict[key]: serializable_config_dict[key] = value return serializable_config_dict def to_dict(self): """ Serializes this instance to a Python dictionary. Returns: :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) if hasattr(self.__class__, "model_type"): output["model_type"] = self.__class__.model_type return output def to_json_string(self, use_diff=True): """ Serializes this instance to a JSON string. Args: use_diff (:obj:`bool`): If set to True, only the difference between the config instance and the default PretrainedConfig() is serialized to JSON string. Returns: :obj:`string`: String containing all the attributes that make up this configuration instance in JSON format. """ if use_diff is True: config_dict = self.to_diff_dict() else: config_dict = self.to_dict() return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path, use_diff=True): """ Save this instance to a json file. Args: json_file_path (:obj:`string`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (:obj:`bool`): If set to True, only the difference between the config instance and the default PretrainedConfig() is serialized to JSON file. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string(use_diff=use_diff)) def update(self, config_dict: Dict): """ Updates attributes of this class with attributes from `config_dict`. Args: :obj:`Dict[str, any]`: Dictionary of attributes that shall be updated for this class. """ for key, value in config_dict.items(): setattr(self, key, value)
19,039
45.552567
193
py
TextSiM
TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/src/transformers/convert_longformer_original_pytorch_lightning_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RoBERTa checkpoint.""" import argparse import pytorch_lightning as pl import torch from transformers.modeling_longformer import LongformerForQuestionAnswering, LongformerModel class LightningModel(pl.LightningModule): def __init__(self, model): super().__init__() self.model = model self.num_labels = 2 self.qa_outputs = torch.nn.Linear(self.model.config.hidden_size, self.num_labels) # implement only because lighning requires to do so def forward(self): pass def convert_longformer_qa_checkpoint_to_pytorch( longformer_model: str, longformer_question_answering_ckpt_path: str, pytorch_dump_folder_path: str ): # load longformer model from model identifier longformer = LongformerModel.from_pretrained(longformer_model) lightning_model = LightningModel(longformer) ckpt = torch.load(longformer_question_answering_ckpt_path, map_location=torch.device("cpu")) lightning_model.load_state_dict(ckpt["state_dict"]) # init longformer question answering model longformer_for_qa = LongformerForQuestionAnswering.from_pretrained(longformer_model) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(pytorch_dump_folder_path) print("Conversion succesful. Model saved under {}".format(pytorch_dump_folder_path)) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lighning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
3,037
33.91954
117
py